From 07ef5dd3bc428996960a7227af1f5d6ca58fa56a Mon Sep 17 00:00:00 2001 From: Yongxuan Zhang Date: Mon, 19 Dec 2022 19:14:06 +0000 Subject: [PATCH] [tep-0091] add kms libraries This commit adds kms libraries into trusted resources verifier package, so that we could use kms for verification. Before this commit we only supports loading keys from files. Signed-off-by: Yongxuan Zhang yongxuanzhang@google.com --- go.mod | 81 +- go.sum | 141 + pkg/trustedresources/verifier/verifier.go | 4 + .../go/compute/internal/version.go | 2 +- .../go/compute/metadata/CHANGES.md | 7 + .../go/compute/metadata/metadata.go | 1 + vendor/cloud.google.com/go/iam/CHANGES.md | 62 + vendor/cloud.google.com/go/iam/LICENSE | 202 + vendor/cloud.google.com/go/iam/README.md | 40 + .../go/iam/apiv1/iampb/iam_policy.pb.go | 672 + .../go/iam/apiv1/iampb/options.pb.go | 187 + .../go/iam/apiv1/iampb/policy.pb.go | 1169 ++ vendor/cloud.google.com/go/iam/iam.go | 387 + vendor/cloud.google.com/go/kms/LICENSE | 202 + vendor/cloud.google.com/go/kms/apiv1/doc.go | 178 + .../go/kms/apiv1/ekm_client.go | 647 + .../go/kms/apiv1/gapic_metadata.json | 227 + vendor/cloud.google.com/go/kms/apiv1/iam.go | 40 + .../go/kms/apiv1/key_management_client.go | 1800 +++ .../go/kms/apiv1/kmspb/ekm_service.pb.go | 1301 ++ .../go/kms/apiv1/kmspb/resources.pb.go | 2545 +++ .../go/kms/apiv1/kmspb/service.pb.go | 6284 ++++++++ .../cloud.google.com/go/kms/apiv1/version.go | 23 + .../go/kms/internal/version.go | 18 + .../services/keyvault/auth/auth.go | 65 + .../keyvault/v7.1/keyvault/CHANGELOG.md | 26 + .../services/keyvault/v7.1/keyvault/client.go | 7313 +++++++++ .../v7.1/keyvault/dataplane_meta.json | 11 + .../services/keyvault/v7.1/keyvault/enums.go | 231 + .../services/keyvault/v7.1/keyvault/models.go | 3611 +++++ .../keyvault/v7.1/keyvault/version.go | 19 + .../Azure/go-autorest/autorest/to/LICENSE | 191 + .../Azure/go-autorest/autorest/to/convert.go | 152 + .../autorest/to/go_mod_tidy_hack.go | 24 + .../go-autorest/autorest/validation/LICENSE | 191 + .../go-autorest/autorest/validation/error.go | 48 + .../autorest/validation/go_mod_tidy_hack.go | 24 + .../autorest/validation/validation.go | 406 + vendor/github.com/armon/go-metrics/.gitignore | 26 + .../github.com/armon/go-metrics/.travis.yml | 13 + vendor/github.com/armon/go-metrics/LICENSE | 20 + vendor/github.com/armon/go-metrics/README.md | 91 + .../github.com/armon/go-metrics/const_unix.go | 12 + .../armon/go-metrics/const_windows.go | 13 + vendor/github.com/armon/go-metrics/inmem.go | 339 + .../armon/go-metrics/inmem_endpoint.go | 162 + .../armon/go-metrics/inmem_signal.go | 117 + vendor/github.com/armon/go-metrics/metrics.go | 299 + vendor/github.com/armon/go-metrics/sink.go | 132 + vendor/github.com/armon/go-metrics/start.go | 158 + vendor/github.com/armon/go-metrics/statsd.go | 184 + .../github.com/armon/go-metrics/statsite.go | 172 + vendor/github.com/armon/go-radix/.gitignore | 22 + vendor/github.com/armon/go-radix/.travis.yml | 3 + vendor/github.com/armon/go-radix/LICENSE | 20 + vendor/github.com/armon/go-radix/README.md | 38 + vendor/github.com/armon/go-radix/radix.go | 540 + .../github.com/aws/aws-sdk-go-v2/CHANGELOG.md | 568 + vendor/github.com/aws/aws-sdk-go-v2/Makefile | 1 + .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 4 + .../config/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/credentials/CHANGELOG.md | 4 + .../credentials/go_module_metadata.go | 2 +- .../feature/ec2/imds/CHANGELOG.md | 4 + .../feature/ec2/imds/go_module_metadata.go | 2 +- .../internal/configsources/CHANGELOG.md | 4 + .../configsources/go_module_metadata.go | 2 +- .../internal/endpoints/v2/CHANGELOG.md | 4 + .../endpoints/v2/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/ini/CHANGELOG.md | 4 + .../internal/ini/go_module_metadata.go | 2 +- .../github.com/aws/aws-sdk-go-v2/modman.toml | 4 +- .../internal/presigned-url/CHANGELOG.md | 4 + .../presigned-url/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/kms/CHANGELOG.md | 230 + .../aws/aws-sdk-go-v2/service/kms/LICENSE.txt | 202 + .../aws-sdk-go-v2/service/kms/api_client.go | 434 + .../service/kms/api_op_CancelKeyDeletion.go | 144 + .../kms/api_op_ConnectCustomKeyStore.go | 187 + .../service/kms/api_op_CreateAlias.go | 184 + .../kms/api_op_CreateCustomKeyStore.go | 322 + .../service/kms/api_op_CreateGrant.go | 277 + .../service/kms/api_op_CreateKey.go | 505 + .../service/kms/api_op_Decrypt.go | 262 + .../service/kms/api_op_DeleteAlias.go | 147 + .../kms/api_op_DeleteCustomKeyStore.go | 164 + .../kms/api_op_DeleteImportedKeyMaterial.go | 145 + .../kms/api_op_DescribeCustomKeyStores.go | 287 + .../service/kms/api_op_DescribeKey.go | 211 + .../service/kms/api_op_DisableKey.go | 138 + .../service/kms/api_op_DisableKeyRotation.go | 167 + .../kms/api_op_DisconnectCustomKeyStore.go | 153 + .../service/kms/api_op_EnableKey.go | 135 + .../service/kms/api_op_EnableKeyRotation.go | 179 + .../service/kms/api_op_Encrypt.go | 260 + .../service/kms/api_op_GenerateDataKey.go | 266 + .../service/kms/api_op_GenerateDataKeyPair.go | 248 + ..._op_GenerateDataKeyPairWithoutPlaintext.go | 235 + .../api_op_GenerateDataKeyWithoutPlaintext.go | 231 + .../service/kms/api_op_GenerateMac.go | 181 + .../service/kms/api_op_GenerateRandom.go | 140 + .../service/kms/api_op_GetKeyPolicy.go | 140 + .../kms/api_op_GetKeyRotationStatus.go | 186 + .../kms/api_op_GetParametersForImport.go | 190 + .../service/kms/api_op_GetPublicKey.go | 235 + .../service/kms/api_op_ImportKeyMaterial.go | 223 + .../service/kms/api_op_ListAliases.go | 270 + .../service/kms/api_op_ListGrants.go | 283 + .../service/kms/api_op_ListKeyPolicies.go | 257 + .../service/kms/api_op_ListKeys.go | 242 + .../service/kms/api_op_ListResourceTags.go | 272 + .../service/kms/api_op_ListRetirableGrants.go | 274 + .../service/kms/api_op_PutKeyPolicy.go | 199 + .../service/kms/api_op_ReEncrypt.go | 329 + .../service/kms/api_op_ReplicateKey.go | 337 + .../service/kms/api_op_RetireGrant.go | 159 + .../service/kms/api_op_RevokeGrant.go | 161 + .../service/kms/api_op_ScheduleKeyDeletion.go | 207 + .../aws-sdk-go-v2/service/kms/api_op_Sign.go | 238 + .../service/kms/api_op_TagResource.go | 176 + .../service/kms/api_op_UntagResource.go | 161 + .../service/kms/api_op_UpdateAlias.go | 185 + .../kms/api_op_UpdateCustomKeyStore.go | 266 + .../kms/api_op_UpdateKeyDescription.go | 143 + .../service/kms/api_op_UpdatePrimaryRegion.go | 199 + .../service/kms/api_op_Verify.go | 222 + .../service/kms/api_op_VerifyMac.go | 183 + .../service/kms/deserializers.go | 12815 ++++++++++++++++ .../aws/aws-sdk-go-v2/service/kms/doc.go | 78 + .../aws-sdk-go-v2/service/kms/endpoints.go | 200 + .../aws-sdk-go-v2/service/kms/generated.json | 77 + .../service/kms/go_module_metadata.go | 6 + .../kms/internal/endpoints/endpoints.go | 860 ++ .../aws-sdk-go-v2/service/kms/serializers.go | 4148 +++++ .../aws-sdk-go-v2/service/kms/types/enums.go | 545 + .../aws-sdk-go-v2/service/kms/types/errors.go | 1122 ++ .../aws-sdk-go-v2/service/kms/types/types.go | 630 + .../aws-sdk-go-v2/service/kms/validators.go | 1933 +++ .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 4 + .../service/sso/go_module_metadata.go | 2 +- .../service/ssooidc/CHANGELOG.md | 4 + .../service/ssooidc/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 4 + .../service/sts/go_module_metadata.go | 2 +- vendor/github.com/aws/smithy-go/CHANGELOG.md | 4 + vendor/github.com/aws/smithy-go/README.md | 2 +- .../aws/smithy-go/encoding/xml/doc.go | 2 +- .../aws/smithy-go/go_module_metadata.go | 2 +- .../github.com/cenkalti/backoff/v3/.gitignore | 22 + .../cenkalti/backoff/v3/.travis.yml | 10 + vendor/github.com/cenkalti/backoff/v3/LICENSE | 20 + .../github.com/cenkalti/backoff/v3/README.md | 33 + .../github.com/cenkalti/backoff/v3/backoff.go | 66 + .../github.com/cenkalti/backoff/v3/context.go | 66 + .../cenkalti/backoff/v3/exponential.go | 154 + .../github.com/cenkalti/backoff/v3/retry.go | 96 + .../github.com/cenkalti/backoff/v3/ticker.go | 94 + .../github.com/cenkalti/backoff/v3/timer.go | 35 + .../github.com/cenkalti/backoff/v3/tries.go | 35 + vendor/github.com/fatih/color/LICENSE.md | 20 + vendor/github.com/fatih/color/README.md | 178 + vendor/github.com/fatih/color/color.go | 618 + vendor/github.com/fatih/color/doc.go | 135 + .../golang/protobuf/ptypes/empty/empty.pb.go | 62 + vendor/github.com/golang/snappy/.gitignore | 16 + vendor/github.com/golang/snappy/AUTHORS | 18 + vendor/github.com/golang/snappy/CONTRIBUTORS | 41 + vendor/github.com/golang/snappy/LICENSE | 27 + vendor/github.com/golang/snappy/README | 107 + vendor/github.com/golang/snappy/decode.go | 264 + .../github.com/golang/snappy/decode_amd64.s | 490 + .../github.com/golang/snappy/decode_arm64.s | 494 + vendor/github.com/golang/snappy/decode_asm.go | 15 + .../github.com/golang/snappy/decode_other.go | 115 + vendor/github.com/golang/snappy/encode.go | 289 + .../github.com/golang/snappy/encode_amd64.s | 730 + .../github.com/golang/snappy/encode_arm64.s | 722 + vendor/github.com/golang/snappy/encode_asm.go | 30 + .../github.com/golang/snappy/encode_other.go | 238 + vendor/github.com/golang/snappy/snappy.go | 98 + .../enterprise-certificate-proxy/LICENSE | 202 + .../client/client.go | 168 + .../client/util/util.go | 71 + .../gax-go/v2/.release-please-manifest.json | 3 + .../googleapis/gax-go/v2/CHANGES.md | 47 + .../github.com/googleapis/gax-go/v2/LICENSE | 27 + .../googleapis/gax-go/v2/apierror/apierror.go | 342 + .../v2/apierror/internal/proto/README.md | 30 + .../internal/proto/custom_error.pb.go | 256 + .../internal/proto/custom_error.proto | 50 + .../v2/apierror/internal/proto/error.pb.go | 280 + .../v2/apierror/internal/proto/error.proto | 46 + .../googleapis/gax-go/v2/call_option.go | 244 + .../googleapis/gax-go/v2/content_type.go | 112 + vendor/github.com/googleapis/gax-go/v2/gax.go | 41 + .../github.com/googleapis/gax-go/v2/header.go | 53 + .../googleapis/gax-go/v2/internal/version.go | 33 + .../github.com/googleapis/gax-go/v2/invoke.go | 104 + .../googleapis/gax-go/v2/proto_json_stream.go | 126 + .../gax-go/v2/release-please-config.json | 10 + .../github.com/hashicorp/go-cleanhttp/LICENSE | 363 + .../hashicorp/go-cleanhttp/README.md | 30 + .../hashicorp/go-cleanhttp/cleanhttp.go | 58 + .../github.com/hashicorp/go-cleanhttp/doc.go | 20 + .../hashicorp/go-cleanhttp/handlers.go | 48 + .../github.com/hashicorp/go-hclog/.gitignore | 1 + vendor/github.com/hashicorp/go-hclog/LICENSE | 21 + .../github.com/hashicorp/go-hclog/README.md | 148 + .../hashicorp/go-hclog/colorize_unix.go | 29 + .../hashicorp/go-hclog/colorize_windows.go | 38 + .../github.com/hashicorp/go-hclog/context.go | 38 + .../github.com/hashicorp/go-hclog/exclude.go | 71 + .../github.com/hashicorp/go-hclog/global.go | 64 + .../hashicorp/go-hclog/interceptlogger.go | 204 + .../hashicorp/go-hclog/intlogger.go | 911 ++ .../github.com/hashicorp/go-hclog/logger.go | 373 + .../hashicorp/go-hclog/nulllogger.go | 58 + .../hashicorp/go-hclog/stacktrace.go | 109 + .../github.com/hashicorp/go-hclog/stdlog.go | 110 + .../github.com/hashicorp/go-hclog/writer.go | 82 + .../hashicorp/go-immutable-radix/.gitignore | 24 + .../hashicorp/go-immutable-radix/CHANGELOG.md | 23 + .../hashicorp/go-immutable-radix/LICENSE | 363 + .../hashicorp/go-immutable-radix/README.md | 66 + .../hashicorp/go-immutable-radix/edges.go | 21 + .../hashicorp/go-immutable-radix/iradix.go | 676 + .../hashicorp/go-immutable-radix/iter.go | 205 + .../hashicorp/go-immutable-radix/node.go | 334 + .../hashicorp/go-immutable-radix/raw_iter.go | 78 + .../go-immutable-radix/reverse_iter.go | 239 + .../github.com/hashicorp/go-plugin/.gitignore | 2 + .../hashicorp/go-plugin/CHANGELOG.md | 25 + vendor/github.com/hashicorp/go-plugin/LICENSE | 355 + .../github.com/hashicorp/go-plugin/README.md | 164 + .../github.com/hashicorp/go-plugin/client.go | 1055 ++ .../hashicorp/go-plugin/discover.go | 28 + .../github.com/hashicorp/go-plugin/error.go | 24 + .../hashicorp/go-plugin/grpc_broker.go | 457 + .../hashicorp/go-plugin/grpc_client.go | 126 + .../hashicorp/go-plugin/grpc_controller.go | 23 + .../hashicorp/go-plugin/grpc_server.go | 161 + .../hashicorp/go-plugin/grpc_stdio.go | 207 + .../go-plugin/internal/plugin/gen.go | 3 + .../internal/plugin/grpc_broker.pb.go | 203 + .../internal/plugin/grpc_broker.proto | 13 + .../internal/plugin/grpc_controller.pb.go | 145 + .../internal/plugin/grpc_controller.proto | 11 + .../internal/plugin/grpc_stdio.pb.go | 233 + .../internal/plugin/grpc_stdio.proto | 30 + .../hashicorp/go-plugin/log_entry.go | 73 + vendor/github.com/hashicorp/go-plugin/mtls.go | 73 + .../hashicorp/go-plugin/mux_broker.go | 204 + .../github.com/hashicorp/go-plugin/plugin.go | 58 + .../github.com/hashicorp/go-plugin/process.go | 24 + .../hashicorp/go-plugin/process_posix.go | 20 + .../hashicorp/go-plugin/process_windows.go | 30 + .../hashicorp/go-plugin/protocol.go | 45 + .../hashicorp/go-plugin/rpc_client.go | 170 + .../hashicorp/go-plugin/rpc_server.go | 206 + .../github.com/hashicorp/go-plugin/server.go | 591 + .../hashicorp/go-plugin/server_mux.go | 31 + .../github.com/hashicorp/go-plugin/stream.go | 18 + .../github.com/hashicorp/go-plugin/testing.go | 180 + .../hashicorp/go-retryablehttp/.gitignore | 4 + .../hashicorp/go-retryablehttp/LICENSE | 363 + .../hashicorp/go-retryablehttp/Makefile | 11 + .../hashicorp/go-retryablehttp/README.md | 81 + .../hashicorp/go-retryablehttp/client.go | 815 + .../go-retryablehttp/roundtripper.go | 52 + .../hashicorp/go-rootcerts/.travis.yml | 12 + .../github.com/hashicorp/go-rootcerts/LICENSE | 363 + .../hashicorp/go-rootcerts/Makefile | 8 + .../hashicorp/go-rootcerts/README.md | 44 + .../github.com/hashicorp/go-rootcerts/doc.go | 9 + .../hashicorp/go-rootcerts/rootcerts.go | 123 + .../hashicorp/go-rootcerts/rootcerts_base.go | 12 + .../go-rootcerts/rootcerts_darwin.go | 48 + .../hashicorp/go-secure-stdlib/mlock/LICENSE | 363 + .../hashicorp/go-secure-stdlib/mlock/mlock.go | 15 + .../go-secure-stdlib/mlock/mlock_unavail.go | 13 + .../go-secure-stdlib/mlock/mlock_unix.go | 18 + .../go-secure-stdlib/parseutil/LICENSE | 363 + .../go-secure-stdlib/parseutil/parsepath.go | 65 + .../go-secure-stdlib/parseutil/parseutil.go | 502 + .../go-secure-stdlib/strutil/LICENSE | 363 + .../go-secure-stdlib/strutil/strutil.go | 510 + .../hashicorp/go-sockaddr/.gitignore | 26 + .../hashicorp/go-sockaddr/GNUmakefile | 65 + .../github.com/hashicorp/go-sockaddr/LICENSE | 373 + .../hashicorp/go-sockaddr/README.md | 118 + .../github.com/hashicorp/go-sockaddr/doc.go | 5 + .../hashicorp/go-sockaddr/ifaddr.go | 254 + .../hashicorp/go-sockaddr/ifaddrs.go | 1304 ++ .../hashicorp/go-sockaddr/ifattr.go | 65 + .../hashicorp/go-sockaddr/ipaddr.go | 169 + .../hashicorp/go-sockaddr/ipaddrs.go | 98 + .../hashicorp/go-sockaddr/ipv4addr.go | 516 + .../hashicorp/go-sockaddr/ipv6addr.go | 591 + .../github.com/hashicorp/go-sockaddr/rfc.go | 948 ++ .../hashicorp/go-sockaddr/route_info.go | 19 + .../go-sockaddr/route_info_android.go | 34 + .../hashicorp/go-sockaddr/route_info_bsd.go | 36 + .../go-sockaddr/route_info_default.go | 10 + .../hashicorp/go-sockaddr/route_info_linux.go | 42 + .../go-sockaddr/route_info_solaris.go | 37 + .../go-sockaddr/route_info_windows.go | 41 + .../hashicorp/go-sockaddr/sockaddr.go | 206 + .../hashicorp/go-sockaddr/sockaddrs.go | 193 + .../hashicorp/go-sockaddr/unixsock.go | 135 + .../github.com/hashicorp/go-uuid/.travis.yml | 12 + vendor/github.com/hashicorp/go-uuid/LICENSE | 365 + vendor/github.com/hashicorp/go-uuid/README.md | 8 + vendor/github.com/hashicorp/go-uuid/uuid.go | 83 + vendor/github.com/hashicorp/hcl/.gitignore | 9 + vendor/github.com/hashicorp/hcl/.travis.yml | 13 + vendor/github.com/hashicorp/hcl/LICENSE | 354 + vendor/github.com/hashicorp/hcl/Makefile | 18 + vendor/github.com/hashicorp/hcl/README.md | 125 + vendor/github.com/hashicorp/hcl/appveyor.yml | 19 + vendor/github.com/hashicorp/hcl/decoder.go | 729 + vendor/github.com/hashicorp/hcl/hcl.go | 11 + .../github.com/hashicorp/hcl/hcl/ast/ast.go | 219 + .../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 + .../hashicorp/hcl/hcl/parser/error.go | 17 + .../hashicorp/hcl/hcl/parser/parser.go | 532 + .../hashicorp/hcl/hcl/scanner/scanner.go | 652 + .../hashicorp/hcl/hcl/strconv/quote.go | 241 + .../hashicorp/hcl/hcl/token/position.go | 46 + .../hashicorp/hcl/hcl/token/token.go | 219 + .../hashicorp/hcl/json/parser/flatten.go | 117 + .../hashicorp/hcl/json/parser/parser.go | 313 + .../hashicorp/hcl/json/scanner/scanner.go | 451 + .../hashicorp/hcl/json/token/position.go | 46 + .../hashicorp/hcl/json/token/token.go | 118 + vendor/github.com/hashicorp/hcl/lex.go | 38 + vendor/github.com/hashicorp/hcl/parse.go | 39 + vendor/github.com/hashicorp/vault/api/LICENSE | 365 + .../github.com/hashicorp/vault/api/README.md | 9 + vendor/github.com/hashicorp/vault/api/auth.go | 112 + .../hashicorp/vault/api/auth_token.go | 374 + .../github.com/hashicorp/vault/api/client.go | 1795 +++ vendor/github.com/hashicorp/vault/api/help.go | 37 + vendor/github.com/hashicorp/vault/api/kv.go | 56 + .../github.com/hashicorp/vault/api/kv_v1.go | 57 + .../github.com/hashicorp/vault/api/kv_v2.go | 778 + .../hashicorp/vault/api/lifetime_watcher.go | 403 + .../github.com/hashicorp/vault/api/logical.go | 366 + .../hashicorp/vault/api/output_policy.go | 82 + .../hashicorp/vault/api/output_string.go | 95 + .../hashicorp/vault/api/plugin_helpers.go | 267 + .../github.com/hashicorp/vault/api/request.go | 148 + .../hashicorp/vault/api/response.go | 133 + .../github.com/hashicorp/vault/api/secret.go | 320 + vendor/github.com/hashicorp/vault/api/ssh.go | 75 + .../hashicorp/vault/api/ssh_agent.go | 247 + vendor/github.com/hashicorp/vault/api/sys.go | 11 + .../hashicorp/vault/api/sys_audit.go | 156 + .../hashicorp/vault/api/sys_auth.go | 98 + .../hashicorp/vault/api/sys_capabilities.go | 77 + .../hashicorp/vault/api/sys_config_cors.go | 91 + .../hashicorp/vault/api/sys_generate_root.go | 195 + .../hashicorp/vault/api/sys_hastatus.go | 43 + .../hashicorp/vault/api/sys_health.go | 49 + .../hashicorp/vault/api/sys_init.go | 74 + .../hashicorp/vault/api/sys_leader.go | 41 + .../hashicorp/vault/api/sys_leases.go | 163 + .../github.com/hashicorp/vault/api/sys_mfa.go | 45 + .../hashicorp/vault/api/sys_monitor.go | 73 + .../hashicorp/vault/api/sys_mounts.go | 320 + .../hashicorp/vault/api/sys_plugins.go | 380 + .../hashicorp/vault/api/sys_policy.go | 134 + .../hashicorp/vault/api/sys_raft.go | 382 + .../hashicorp/vault/api/sys_rekey.go | 479 + .../hashicorp/vault/api/sys_rotate.go | 102 + .../hashicorp/vault/api/sys_seal.go | 118 + .../hashicorp/vault/api/sys_stepdown.go | 23 + vendor/github.com/hashicorp/vault/sdk/LICENSE | 365 + .../vault/sdk/helper/certutil/helpers.go | 1386 ++ .../vault/sdk/helper/certutil/types.go | 1015 ++ .../vault/sdk/helper/compressutil/compress.go | 222 + .../vault/sdk/helper/consts/agent.go | 12 + .../vault/sdk/helper/consts/consts.go | 37 + .../sdk/helper/consts/deprecation_status.go | 31 + .../vault/sdk/helper/consts/error.go | 25 + .../vault/sdk/helper/consts/plugin_types.go | 59 + .../vault/sdk/helper/consts/replication.go | 159 + .../vault/sdk/helper/consts/token_consts.go | 10 + .../vault/sdk/helper/cryptoutil/cryptoutil.go | 11 + .../vault/sdk/helper/errutil/error.go | 20 + .../hashicorp/vault/sdk/helper/hclutil/hcl.go | 36 + .../vault/sdk/helper/jsonutil/json.go | 100 + .../vault/sdk/helper/license/feature.go | 10 + .../vault/sdk/helper/locksutil/locks.go | 58 + .../vault/sdk/helper/logging/logging.go | 81 + .../sdk/helper/pathmanager/pathmanager.go | 136 + .../vault/sdk/helper/pluginutil/env.go | 77 + .../sdk/helper/pluginutil/multiplexing.go | 80 + .../sdk/helper/pluginutil/multiplexing.pb.go | 213 + .../sdk/helper/pluginutil/multiplexing.proto | 13 + .../helper/pluginutil/multiplexing_grpc.pb.go | 101 + .../vault/sdk/helper/pluginutil/run_config.go | 179 + .../vault/sdk/helper/pluginutil/runner.go | 115 + .../vault/sdk/helper/pluginutil/tls.go | 106 + .../vault/sdk/helper/strutil/strutil.go | 94 + .../vault/sdk/helper/wrapping/wrapinfo.go | 37 + .../hashicorp/vault/sdk/logical/audit.go | 19 + .../hashicorp/vault/sdk/logical/auth.go | 129 + .../hashicorp/vault/sdk/logical/connection.go | 18 + .../vault/sdk/logical/controlgroup.go | 17 + .../hashicorp/vault/sdk/logical/error.go | 122 + .../vault/sdk/logical/identity.pb.go | 709 + .../vault/sdk/logical/identity.proto | 91 + .../hashicorp/vault/sdk/logical/lease.go | 53 + .../hashicorp/vault/sdk/logical/logical.go | 156 + .../vault/sdk/logical/logical_storage.go | 52 + .../vault/sdk/logical/managed_key.go | 97 + .../hashicorp/vault/sdk/logical/plugin.pb.go | 171 + .../hashicorp/vault/sdk/logical/plugin.proto | 16 + .../hashicorp/vault/sdk/logical/request.go | 394 + .../hashicorp/vault/sdk/logical/response.go | 322 + .../vault/sdk/logical/response_util.go | 204 + .../hashicorp/vault/sdk/logical/secret.go | 30 + .../hashicorp/vault/sdk/logical/storage.go | 158 + .../vault/sdk/logical/storage_inmem.go | 87 + .../vault/sdk/logical/storage_view.go | 110 + .../vault/sdk/logical/system_view.go | 235 + .../hashicorp/vault/sdk/logical/testing.go | 87 + .../hashicorp/vault/sdk/logical/token.go | 304 + .../vault/sdk/logical/translate_response.go | 161 + .../hashicorp/vault/sdk/logical/version.pb.go | 204 + .../hashicorp/vault/sdk/logical/version.proto | 17 + .../vault/sdk/logical/version_grpc.pb.go | 103 + .../hashicorp/vault/sdk/physical/cache.go | 260 + .../hashicorp/vault/sdk/physical/encoding.go | 108 + .../hashicorp/vault/sdk/physical/entry.go | 20 + .../hashicorp/vault/sdk/physical/error.go | 110 + .../vault/sdk/physical/inmem/inmem.go | 310 + .../vault/sdk/physical/inmem/inmem_ha.go | 167 + .../hashicorp/vault/sdk/physical/latency.go | 113 + .../hashicorp/vault/sdk/physical/physical.go | 134 + .../vault/sdk/physical/physical_access.go | 40 + .../vault/sdk/physical/physical_view.go | 94 + .../hashicorp/vault/sdk/physical/testing.go | 497 + .../vault/sdk/physical/transactions.go | 150 + .../hashicorp/vault/sdk/version/cgo.go | 7 + .../hashicorp/vault/sdk/version/version.go | 80 + .../vault/sdk/version/version_base.go | 17 + vendor/github.com/hashicorp/yamux/.gitignore | 23 + vendor/github.com/hashicorp/yamux/LICENSE | 362 + vendor/github.com/hashicorp/yamux/README.md | 86 + vendor/github.com/hashicorp/yamux/addr.go | 60 + vendor/github.com/hashicorp/yamux/const.go | 182 + vendor/github.com/hashicorp/yamux/mux.go | 114 + vendor/github.com/hashicorp/yamux/session.go | 732 + vendor/github.com/hashicorp/yamux/spec.md | 140 + vendor/github.com/hashicorp/yamux/stream.go | 544 + vendor/github.com/hashicorp/yamux/util.go | 43 + .../jellydator/ttlcache/v2/CHANGELOG.md | 94 + .../github.com/jellydator/ttlcache/v2/LICENSE | 21 + .../jellydator/ttlcache/v2/Readme.md | 145 + .../jellydator/ttlcache/v2/cache.go | 605 + .../ttlcache/v2/evictionreason_enumer.go | 52 + .../github.com/jellydator/ttlcache/v2/item.go | 46 + .../jellydator/ttlcache/v2/metrics.go | 15 + .../jellydator/ttlcache/v2/priority_queue.go | 85 + vendor/github.com/mattn/go-colorable/LICENSE | 21 + .../github.com/mattn/go-colorable/README.md | 48 + .../mattn/go-colorable/colorable_appengine.go | 38 + .../mattn/go-colorable/colorable_others.go | 38 + .../mattn/go-colorable/colorable_windows.go | 1047 ++ .../github.com/mattn/go-colorable/go.test.sh | 12 + .../mattn/go-colorable/noncolorable.go | 57 + vendor/github.com/mattn/go-isatty/LICENSE | 9 + vendor/github.com/mattn/go-isatty/README.md | 50 + vendor/github.com/mattn/go-isatty/doc.go | 2 + vendor/github.com/mattn/go-isatty/go.test.sh | 12 + .../github.com/mattn/go-isatty/isatty_bsd.go | 19 + .../mattn/go-isatty/isatty_others.go | 16 + .../mattn/go-isatty/isatty_plan9.go | 23 + .../mattn/go-isatty/isatty_solaris.go | 21 + .../mattn/go-isatty/isatty_tcgets.go | 19 + .../mattn/go-isatty/isatty_windows.go | 125 + .../go-testing-interface/.travis.yml | 12 + .../mitchellh/go-testing-interface/LICENSE | 21 + .../mitchellh/go-testing-interface/README.md | 60 + .../mitchellh/go-testing-interface/testing.go | 112 + .../mitchellh/mapstructure/CHANGELOG.md | 96 + .../github.com/mitchellh/mapstructure/LICENSE | 21 + .../mitchellh/mapstructure/README.md | 46 + .../mitchellh/mapstructure/decode_hooks.go | 279 + .../mitchellh/mapstructure/error.go | 50 + .../mitchellh/mapstructure/mapstructure.go | 1540 ++ vendor/github.com/oklog/run/.gitignore | 14 + vendor/github.com/oklog/run/LICENSE | 201 + vendor/github.com/oklog/run/README.md | 75 + vendor/github.com/oklog/run/actors.go | 38 + vendor/github.com/oklog/run/group.go | 62 + vendor/github.com/pierrec/lz4/.gitignore | 34 + vendor/github.com/pierrec/lz4/.travis.yml | 24 + vendor/github.com/pierrec/lz4/LICENSE | 28 + vendor/github.com/pierrec/lz4/README.md | 90 + vendor/github.com/pierrec/lz4/block.go | 413 + vendor/github.com/pierrec/lz4/debug.go | 23 + vendor/github.com/pierrec/lz4/debug_stub.go | 7 + vendor/github.com/pierrec/lz4/decode_amd64.go | 8 + vendor/github.com/pierrec/lz4/decode_amd64.s | 375 + vendor/github.com/pierrec/lz4/decode_other.go | 98 + vendor/github.com/pierrec/lz4/errors.go | 30 + .../pierrec/lz4/internal/xxh32/xxh32zero.go | 223 + vendor/github.com/pierrec/lz4/lz4.go | 116 + vendor/github.com/pierrec/lz4/lz4_go1.10.go | 29 + .../github.com/pierrec/lz4/lz4_notgo1.10.go | 29 + vendor/github.com/pierrec/lz4/reader.go | 335 + .../github.com/pierrec/lz4/reader_legacy.go | 207 + vendor/github.com/pierrec/lz4/writer.go | 422 + .../github.com/pierrec/lz4/writer_legacy.go | 182 + .../github.com/ryanuber/go-glob/.travis.yml | 5 + vendor/github.com/ryanuber/go-glob/LICENSE | 21 + vendor/github.com/ryanuber/go-glob/README.md | 29 + vendor/github.com/ryanuber/go-glob/glob.go | 56 + .../sigstore/sigstore/pkg/cryptoutils/sans.go | 149 + .../sigstore/pkg/signature/kms/aws/client.go | 374 + .../sigstore/pkg/signature/kms/aws/doc.go | 17 + .../sigstore/pkg/signature/kms/aws/signer.go | 243 + .../pkg/signature/kms/azure/README.md | 55 + .../pkg/signature/kms/azure/client.go | 342 + .../sigstore/pkg/signature/kms/azure/doc.go | 17 + .../pkg/signature/kms/azure/signer.go | 237 + .../sigstore/pkg/signature/kms/doc.go | 17 + .../sigstore/pkg/signature/kms/gcp/client.go | 418 + .../sigstore/pkg/signature/kms/gcp/doc.go | 17 + .../sigstore/pkg/signature/kms/gcp/signer.go | 195 + .../pkg/signature/kms/hashivault/client.go | 381 + .../pkg/signature/kms/hashivault/doc.go | 17 + .../pkg/signature/kms/hashivault/signer.go | 233 + .../sigstore/pkg/signature/kms/kms.go | 78 + vendor/golang.org/x/crypto/blake2b/blake2b.go | 291 + .../x/crypto/blake2b/blake2bAVX2_amd64.go | 38 + .../x/crypto/blake2b/blake2bAVX2_amd64.s | 745 + .../x/crypto/blake2b/blake2b_amd64.go | 25 + .../x/crypto/blake2b/blake2b_amd64.s | 279 + .../x/crypto/blake2b/blake2b_generic.go | 182 + .../x/crypto/blake2b/blake2b_ref.go | 12 + vendor/golang.org/x/crypto/blake2b/blake2x.go | 177 + .../golang.org/x/crypto/blake2b/register.go | 33 + vendor/golang.org/x/crypto/cryptobyte/asn1.go | 816 + .../x/crypto/cryptobyte/asn1/asn1.go | 46 + .../golang.org/x/crypto/cryptobyte/builder.go | 342 + .../golang.org/x/crypto/cryptobyte/string.go | 172 + vendor/golang.org/x/net/http2/hpack/encode.go | 5 + vendor/golang.org/x/net/http2/server.go | 34 +- vendor/golang.org/x/net/http2/transport.go | 78 +- .../google/internal/externalaccount/aws.go | 105 +- .../externalaccount/basecredentials.go | 4 + .../x/sync/singleflight/singleflight.go | 205 + .../golang.org/x/sys/execabs/execabs_go119.go | 8 +- .../x/sys/windows/syscall_windows.go | 1 + .../x/sys/windows/zsyscall_windows.go | 7 + .../golang.org/x/text/unicode/bidi/trieval.go | 12 - .../api/googleapi/googleapi.go | 480 + .../google.golang.org/api/googleapi/types.go | 202 + .../api/internal/conn_pool.go | 30 + .../google.golang.org/api/internal/creds.go | 159 + .../api/internal/impersonate/impersonate.go | 128 + .../api/internal/settings.go | 142 + .../internal/third_party/uritemplates/LICENSE | 27 + .../third_party/uritemplates/METADATA | 14 + .../third_party/uritemplates/uritemplates.go | 248 + .../third_party/uritemplates/utils.go | 17 + .../google.golang.org/api/internal/version.go | 8 + .../api/iterator/iterator.go | 227 + .../option/internaloption/internaloption.go | 136 + vendor/google.golang.org/api/option/option.go | 345 + .../api/transport/cert/default_cert.go | 58 + .../api/transport/cert/enterprise_cert.go | 56 + .../api/transport/cert/secureconnect_cert.go | 123 + .../api/transport/grpc/dial.go | 333 + .../api/transport/grpc/dial_appengine.go | 32 + .../api/transport/grpc/dial_socketopt.go | 52 + .../api/transport/grpc/pool.go | 92 + .../api/transport/internal/dca/dca.go | 143 + .../internal/socket/socket_service.pb.go | 2822 ++++ .../internal/socket/socket_service.proto | 460 + .../google.golang.org/appengine/socket/doc.go | 10 + .../appengine/socket/socket_classic.go | 290 + .../appengine/socket/socket_vm.go | 64 + .../api/annotations/annotations.pb.go | 119 + .../googleapis/api/annotations/client.pb.go | 1652 ++ .../api/annotations/field_behavior.pb.go | 250 + .../googleapis/api/annotations/http.pb.go | 777 + .../googleapis/api/annotations/resource.pb.go | 655 + .../googleapis/api/annotations/routing.pb.go | 693 + .../googleapis/api/launch_stage.pb.go | 203 + .../genproto/googleapis/cloud/kms/v1/alias.go | 652 + .../googleapis/cloud/location/locations.pb.go | 631 + .../genproto/googleapis/iam/v1/alias.go | 208 + .../genproto/googleapis/rpc/code/code.pb.go | 335 + .../rpc/errdetails/error_details.pb.go | 1278 ++ .../genproto/googleapis/type/expr/expr.pb.go | 232 + .../grpclb/grpc_lb_v1/load_balancer.pb.go | 962 ++ .../grpc_lb_v1/load_balancer_grpc.pb.go | 156 + .../grpc/balancer/grpclb/grpclb.go | 520 + .../grpc/balancer/grpclb/grpclb_config.go | 67 + .../grpc/balancer/grpclb/grpclb_picker.go | 202 + .../balancer/grpclb/grpclb_remote_balancer.go | 449 + .../grpc/balancer/grpclb/grpclb_util.go | 208 + .../grpc/credentials/alts/alts.go | 332 + .../alts/internal/authinfo/authinfo.go | 95 + .../grpc/credentials/alts/internal/common.go | 67 + .../alts/internal/conn/aeadrekey.go | 131 + .../alts/internal/conn/aes128gcm.go | 105 + .../alts/internal/conn/aes128gcmrekey.go | 116 + .../credentials/alts/internal/conn/common.go | 70 + .../credentials/alts/internal/conn/counter.go | 62 + .../credentials/alts/internal/conn/record.go | 275 + .../credentials/alts/internal/conn/utils.go | 63 + .../alts/internal/handshaker/handshaker.go | 375 + .../internal/handshaker/service/service.go | 60 + .../internal/proto/grpc_gcp/altscontext.pb.go | 264 + .../internal/proto/grpc_gcp/handshaker.pb.go | 1428 ++ .../proto/grpc_gcp/handshaker_grpc.pb.go | 166 + .../grpc_gcp/transport_security_common.pb.go | 326 + .../grpc/credentials/alts/utils.go | 70 + .../grpc/credentials/google/google.go | 145 + .../grpc/credentials/google/xds.go | 128 + .../grpc/credentials/oauth/oauth.go | 242 + .../google.golang.org/grpc/health/client.go | 117 + .../grpc/health/grpc_health_v1/health.pb.go | 313 + .../health/grpc_health_v1/health_grpc.pb.go | 218 + .../google.golang.org/grpc/health/logging.go | 23 + .../google.golang.org/grpc/health/server.go | 163 + .../grpc/internal/googlecloud/googlecloud.go | 72 + .../grpc/internal/googlecloud/manufacturer.go | 26 + .../googlecloud/manufacturer_linux.go | 27 + .../googlecloud/manufacturer_windows.go | 50 + .../grpc/reflection/README.md | 18 + .../grpc_reflection_v1alpha/reflection.pb.go | 955 ++ .../grpc_reflection_v1alpha/reflection.proto | 138 + .../reflection_grpc.pb.go | 155 + .../grpc/reflection/serverreflection.go | 324 + vendor/modules.txt | 235 +- 642 files changed, 158036 insertions(+), 110 deletions(-) create mode 100644 vendor/cloud.google.com/go/iam/CHANGES.md create mode 100644 vendor/cloud.google.com/go/iam/LICENSE create mode 100644 vendor/cloud.google.com/go/iam/README.md create mode 100644 vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go create mode 100644 vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go create mode 100644 vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go create mode 100644 vendor/cloud.google.com/go/iam/iam.go create mode 100644 vendor/cloud.google.com/go/kms/LICENSE create mode 100644 vendor/cloud.google.com/go/kms/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/ekm_client.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json create mode 100644 vendor/cloud.google.com/go/kms/apiv1/iam.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/key_management_client.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/version.go create mode 100644 vendor/cloud.google.com/go/kms/internal/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/auth/auth.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/CHANGELOG.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/dataplane_meta.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/enums.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/version.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/to/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/to/convert.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/validation/error.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/validation/validation.go create mode 100644 vendor/github.com/armon/go-metrics/.gitignore create mode 100644 vendor/github.com/armon/go-metrics/.travis.yml create mode 100644 vendor/github.com/armon/go-metrics/LICENSE create mode 100644 vendor/github.com/armon/go-metrics/README.md create mode 100644 vendor/github.com/armon/go-metrics/const_unix.go create mode 100644 vendor/github.com/armon/go-metrics/const_windows.go create mode 100644 vendor/github.com/armon/go-metrics/inmem.go create mode 100644 vendor/github.com/armon/go-metrics/inmem_endpoint.go create mode 100644 vendor/github.com/armon/go-metrics/inmem_signal.go create mode 100644 vendor/github.com/armon/go-metrics/metrics.go create mode 100644 vendor/github.com/armon/go-metrics/sink.go create mode 100644 vendor/github.com/armon/go-metrics/start.go create mode 100644 vendor/github.com/armon/go-metrics/statsd.go create mode 100644 vendor/github.com/armon/go-metrics/statsite.go create mode 100644 vendor/github.com/armon/go-radix/.gitignore create mode 100644 vendor/github.com/armon/go-radix/.travis.yml create mode 100644 vendor/github.com/armon/go-radix/LICENSE create mode 100644 vendor/github.com/armon/go-radix/README.md create mode 100644 vendor/github.com/armon/go-radix/radix.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/generated.json create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/enums.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/validators.go create mode 100644 vendor/github.com/cenkalti/backoff/v3/.gitignore create mode 100644 vendor/github.com/cenkalti/backoff/v3/.travis.yml create mode 100644 vendor/github.com/cenkalti/backoff/v3/LICENSE create mode 100644 vendor/github.com/cenkalti/backoff/v3/README.md create mode 100644 vendor/github.com/cenkalti/backoff/v3/backoff.go create mode 100644 vendor/github.com/cenkalti/backoff/v3/context.go create mode 100644 vendor/github.com/cenkalti/backoff/v3/exponential.go create mode 100644 vendor/github.com/cenkalti/backoff/v3/retry.go create mode 100644 vendor/github.com/cenkalti/backoff/v3/ticker.go create mode 100644 vendor/github.com/cenkalti/backoff/v3/timer.go create mode 100644 vendor/github.com/cenkalti/backoff/v3/tries.go create mode 100644 vendor/github.com/fatih/color/LICENSE.md create mode 100644 vendor/github.com/fatih/color/README.md create mode 100644 vendor/github.com/fatih/color/color.go create mode 100644 vendor/github.com/fatih/color/doc.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go create mode 100644 vendor/github.com/golang/snappy/.gitignore create mode 100644 vendor/github.com/golang/snappy/AUTHORS create mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS create mode 100644 vendor/github.com/golang/snappy/LICENSE create mode 100644 vendor/github.com/golang/snappy/README create mode 100644 vendor/github.com/golang/snappy/decode.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.s create mode 100644 vendor/github.com/golang/snappy/decode_arm64.s create mode 100644 vendor/github.com/golang/snappy/decode_asm.go create mode 100644 vendor/github.com/golang/snappy/decode_other.go create mode 100644 vendor/github.com/golang/snappy/encode.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.s create mode 100644 vendor/github.com/golang/snappy/encode_arm64.s create mode 100644 vendor/github.com/golang/snappy/encode_asm.go create mode 100644 vendor/github.com/golang/snappy/encode_other.go create mode 100644 vendor/github.com/golang/snappy/snappy.go create mode 100644 vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE create mode 100644 vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go create mode 100644 vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json create mode 100644 vendor/github.com/googleapis/gax-go/v2/CHANGES.md create mode 100644 vendor/github.com/googleapis/gax-go/v2/LICENSE create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto create mode 100644 vendor/github.com/googleapis/gax-go/v2/call_option.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/content_type.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/gax.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/header.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/internal/version.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/invoke.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/release-please-config.json create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/README.md create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/doc.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/handlers.go create mode 100644 vendor/github.com/hashicorp/go-hclog/.gitignore create mode 100644 vendor/github.com/hashicorp/go-hclog/LICENSE create mode 100644 vendor/github.com/hashicorp/go-hclog/README.md create mode 100644 vendor/github.com/hashicorp/go-hclog/colorize_unix.go create mode 100644 vendor/github.com/hashicorp/go-hclog/colorize_windows.go create mode 100644 vendor/github.com/hashicorp/go-hclog/context.go create mode 100644 vendor/github.com/hashicorp/go-hclog/exclude.go create mode 100644 vendor/github.com/hashicorp/go-hclog/global.go create mode 100644 vendor/github.com/hashicorp/go-hclog/interceptlogger.go create mode 100644 vendor/github.com/hashicorp/go-hclog/intlogger.go create mode 100644 vendor/github.com/hashicorp/go-hclog/logger.go create mode 100644 vendor/github.com/hashicorp/go-hclog/nulllogger.go create mode 100644 vendor/github.com/hashicorp/go-hclog/stacktrace.go create mode 100644 vendor/github.com/hashicorp/go-hclog/stdlog.go create mode 100644 vendor/github.com/hashicorp/go-hclog/writer.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/.gitignore create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/LICENSE create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/README.md create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/edges.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/iradix.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/iter.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/node.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go create mode 100644 vendor/github.com/hashicorp/go-plugin/.gitignore create mode 100644 vendor/github.com/hashicorp/go-plugin/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/go-plugin/LICENSE create mode 100644 vendor/github.com/hashicorp/go-plugin/README.md create mode 100644 vendor/github.com/hashicorp/go-plugin/client.go create mode 100644 vendor/github.com/hashicorp/go-plugin/discover.go create mode 100644 vendor/github.com/hashicorp/go-plugin/error.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_broker.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_client.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_controller.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_server.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_stdio.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto create mode 100644 vendor/github.com/hashicorp/go-plugin/log_entry.go create mode 100644 vendor/github.com/hashicorp/go-plugin/mtls.go create mode 100644 vendor/github.com/hashicorp/go-plugin/mux_broker.go create mode 100644 vendor/github.com/hashicorp/go-plugin/plugin.go create mode 100644 vendor/github.com/hashicorp/go-plugin/process.go create mode 100644 vendor/github.com/hashicorp/go-plugin/process_posix.go create mode 100644 vendor/github.com/hashicorp/go-plugin/process_windows.go create mode 100644 vendor/github.com/hashicorp/go-plugin/protocol.go create mode 100644 vendor/github.com/hashicorp/go-plugin/rpc_client.go create mode 100644 vendor/github.com/hashicorp/go-plugin/rpc_server.go create mode 100644 vendor/github.com/hashicorp/go-plugin/server.go create mode 100644 vendor/github.com/hashicorp/go-plugin/server_mux.go create mode 100644 vendor/github.com/hashicorp/go-plugin/stream.go create mode 100644 vendor/github.com/hashicorp/go-plugin/testing.go create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/.gitignore create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/Makefile create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/README.md create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/client.go create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-rootcerts/LICENSE create mode 100644 vendor/github.com/hashicorp/go-rootcerts/Makefile create mode 100644 vendor/github.com/hashicorp/go-rootcerts/README.md create mode 100644 vendor/github.com/hashicorp/go-rootcerts/doc.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/mlock/LICENSE create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock.go create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unavail.go create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unix.go create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/parseutil/LICENSE create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parsepath.go create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parseutil.go create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/strutil/LICENSE create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/strutil/strutil.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/.gitignore create mode 100644 vendor/github.com/hashicorp/go-sockaddr/GNUmakefile create mode 100644 vendor/github.com/hashicorp/go-sockaddr/LICENSE create mode 100644 vendor/github.com/hashicorp/go-sockaddr/README.md create mode 100644 vendor/github.com/hashicorp/go-sockaddr/doc.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ifaddr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ifattr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipaddr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipv6addr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/rfc.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_android.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_default.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/sockaddr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/unixsock.go create mode 100644 vendor/github.com/hashicorp/go-uuid/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-uuid/LICENSE create mode 100644 vendor/github.com/hashicorp/go-uuid/README.md create mode 100644 vendor/github.com/hashicorp/go-uuid/uuid.go create mode 100644 vendor/github.com/hashicorp/hcl/.gitignore create mode 100644 vendor/github.com/hashicorp/hcl/.travis.yml create mode 100644 vendor/github.com/hashicorp/hcl/LICENSE create mode 100644 vendor/github.com/hashicorp/hcl/Makefile create mode 100644 vendor/github.com/hashicorp/hcl/README.md create mode 100644 vendor/github.com/hashicorp/hcl/appveyor.yml create mode 100644 vendor/github.com/hashicorp/hcl/decoder.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/ast.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/walk.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/error.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/position.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/token.go create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/flatten.go create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/json/scanner/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl/json/token/position.go create mode 100644 vendor/github.com/hashicorp/hcl/json/token/token.go create mode 100644 vendor/github.com/hashicorp/hcl/lex.go create mode 100644 vendor/github.com/hashicorp/hcl/parse.go create mode 100644 vendor/github.com/hashicorp/vault/api/LICENSE create mode 100644 vendor/github.com/hashicorp/vault/api/README.md create mode 100644 vendor/github.com/hashicorp/vault/api/auth.go create mode 100644 vendor/github.com/hashicorp/vault/api/auth_token.go create mode 100644 vendor/github.com/hashicorp/vault/api/client.go create mode 100644 vendor/github.com/hashicorp/vault/api/help.go create mode 100644 vendor/github.com/hashicorp/vault/api/kv.go create mode 100644 vendor/github.com/hashicorp/vault/api/kv_v1.go create mode 100644 vendor/github.com/hashicorp/vault/api/kv_v2.go create mode 100644 vendor/github.com/hashicorp/vault/api/lifetime_watcher.go create mode 100644 vendor/github.com/hashicorp/vault/api/logical.go create mode 100644 vendor/github.com/hashicorp/vault/api/output_policy.go create mode 100644 vendor/github.com/hashicorp/vault/api/output_string.go create mode 100644 vendor/github.com/hashicorp/vault/api/plugin_helpers.go create mode 100644 vendor/github.com/hashicorp/vault/api/request.go create mode 100644 vendor/github.com/hashicorp/vault/api/response.go create mode 100644 vendor/github.com/hashicorp/vault/api/secret.go create mode 100644 vendor/github.com/hashicorp/vault/api/ssh.go create mode 100644 vendor/github.com/hashicorp/vault/api/ssh_agent.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_audit.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_auth.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_capabilities.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_config_cors.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_generate_root.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_hastatus.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_health.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_init.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_leader.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_leases.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_mfa.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_monitor.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_mounts.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_plugins.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_policy.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_raft.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_rekey.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_rotate.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_seal.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_stepdown.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/LICENSE create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/agent.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/deprecation_status.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/error.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/plugin_types.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/token_consts.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/hclutil/hcl.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.pb.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.proto create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing_grpc.pb.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/run_config.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/strutil/strutil.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/audit.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/auth.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/connection.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/error.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/identity.proto create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/lease.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/logical.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/managed_key.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/request.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/response.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/response_util.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/secret.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/storage.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/system_view.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/testing.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/token.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/version.pb.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/version.proto create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/version_grpc.pb.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/cache.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/encoding.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/entry.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/error.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/latency.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/physical.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/testing.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/transactions.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/version/cgo.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/version/version.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/version/version_base.go create mode 100644 vendor/github.com/hashicorp/yamux/.gitignore create mode 100644 vendor/github.com/hashicorp/yamux/LICENSE create mode 100644 vendor/github.com/hashicorp/yamux/README.md create mode 100644 vendor/github.com/hashicorp/yamux/addr.go create mode 100644 vendor/github.com/hashicorp/yamux/const.go create mode 100644 vendor/github.com/hashicorp/yamux/mux.go create mode 100644 vendor/github.com/hashicorp/yamux/session.go create mode 100644 vendor/github.com/hashicorp/yamux/spec.md create mode 100644 vendor/github.com/hashicorp/yamux/stream.go create mode 100644 vendor/github.com/hashicorp/yamux/util.go create mode 100644 vendor/github.com/jellydator/ttlcache/v2/CHANGELOG.md create mode 100644 vendor/github.com/jellydator/ttlcache/v2/LICENSE create mode 100644 vendor/github.com/jellydator/ttlcache/v2/Readme.md create mode 100644 vendor/github.com/jellydator/ttlcache/v2/cache.go create mode 100644 vendor/github.com/jellydator/ttlcache/v2/evictionreason_enumer.go create mode 100644 vendor/github.com/jellydator/ttlcache/v2/item.go create mode 100644 vendor/github.com/jellydator/ttlcache/v2/metrics.go create mode 100644 vendor/github.com/jellydator/ttlcache/v2/priority_queue.go create mode 100644 vendor/github.com/mattn/go-colorable/LICENSE create mode 100644 vendor/github.com/mattn/go-colorable/README.md create mode 100644 vendor/github.com/mattn/go-colorable/colorable_appengine.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_others.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_windows.go create mode 100644 vendor/github.com/mattn/go-colorable/go.test.sh create mode 100644 vendor/github.com/mattn/go-colorable/noncolorable.go create mode 100644 vendor/github.com/mattn/go-isatty/LICENSE create mode 100644 vendor/github.com/mattn/go-isatty/README.md create mode 100644 vendor/github.com/mattn/go-isatty/doc.go create mode 100644 vendor/github.com/mattn/go-isatty/go.test.sh create mode 100644 vendor/github.com/mattn/go-isatty/isatty_bsd.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_others.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_plan9.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_solaris.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_tcgets.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_windows.go create mode 100644 vendor/github.com/mitchellh/go-testing-interface/.travis.yml create mode 100644 vendor/github.com/mitchellh/go-testing-interface/LICENSE create mode 100644 vendor/github.com/mitchellh/go-testing-interface/README.md create mode 100644 vendor/github.com/mitchellh/go-testing-interface/testing.go create mode 100644 vendor/github.com/mitchellh/mapstructure/CHANGELOG.md create mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE create mode 100644 vendor/github.com/mitchellh/mapstructure/README.md create mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go create mode 100644 vendor/github.com/mitchellh/mapstructure/error.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go create mode 100644 vendor/github.com/oklog/run/.gitignore create mode 100644 vendor/github.com/oklog/run/LICENSE create mode 100644 vendor/github.com/oklog/run/README.md create mode 100644 vendor/github.com/oklog/run/actors.go create mode 100644 vendor/github.com/oklog/run/group.go create mode 100644 vendor/github.com/pierrec/lz4/.gitignore create mode 100644 vendor/github.com/pierrec/lz4/.travis.yml create mode 100644 vendor/github.com/pierrec/lz4/LICENSE create mode 100644 vendor/github.com/pierrec/lz4/README.md create mode 100644 vendor/github.com/pierrec/lz4/block.go create mode 100644 vendor/github.com/pierrec/lz4/debug.go create mode 100644 vendor/github.com/pierrec/lz4/debug_stub.go create mode 100644 vendor/github.com/pierrec/lz4/decode_amd64.go create mode 100644 vendor/github.com/pierrec/lz4/decode_amd64.s create mode 100644 vendor/github.com/pierrec/lz4/decode_other.go create mode 100644 vendor/github.com/pierrec/lz4/errors.go create mode 100644 vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go create mode 100644 vendor/github.com/pierrec/lz4/lz4.go create mode 100644 vendor/github.com/pierrec/lz4/lz4_go1.10.go create mode 100644 vendor/github.com/pierrec/lz4/lz4_notgo1.10.go create mode 100644 vendor/github.com/pierrec/lz4/reader.go create mode 100644 vendor/github.com/pierrec/lz4/reader_legacy.go create mode 100644 vendor/github.com/pierrec/lz4/writer.go create mode 100644 vendor/github.com/pierrec/lz4/writer_legacy.go create mode 100644 vendor/github.com/ryanuber/go-glob/.travis.yml create mode 100644 vendor/github.com/ryanuber/go-glob/LICENSE create mode 100644 vendor/github.com/ryanuber/go-glob/README.md create mode 100644 vendor/github.com/ryanuber/go-glob/glob.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/client.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/doc.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/signer.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/README.md create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/doc.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/signer.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/doc.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/client.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/doc.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/signer.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/client.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/doc.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/signer.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2x.go create mode 100644 vendor/golang.org/x/crypto/blake2b/register.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/builder.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/string.go create mode 100644 vendor/golang.org/x/sync/singleflight/singleflight.go create mode 100644 vendor/google.golang.org/api/googleapi/googleapi.go create mode 100644 vendor/google.golang.org/api/googleapi/types.go create mode 100644 vendor/google.golang.org/api/internal/conn_pool.go create mode 100644 vendor/google.golang.org/api/internal/creds.go create mode 100644 vendor/google.golang.org/api/internal/impersonate/impersonate.go create mode 100644 vendor/google.golang.org/api/internal/settings.go create mode 100644 vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE create mode 100644 vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA create mode 100644 vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go create mode 100644 vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go create mode 100644 vendor/google.golang.org/api/internal/version.go create mode 100644 vendor/google.golang.org/api/iterator/iterator.go create mode 100644 vendor/google.golang.org/api/option/internaloption/internaloption.go create mode 100644 vendor/google.golang.org/api/option/option.go create mode 100644 vendor/google.golang.org/api/transport/cert/default_cert.go create mode 100644 vendor/google.golang.org/api/transport/cert/enterprise_cert.go create mode 100644 vendor/google.golang.org/api/transport/cert/secureconnect_cert.go create mode 100644 vendor/google.golang.org/api/transport/grpc/dial.go create mode 100644 vendor/google.golang.org/api/transport/grpc/dial_appengine.go create mode 100644 vendor/google.golang.org/api/transport/grpc/dial_socketopt.go create mode 100644 vendor/google.golang.org/api/transport/grpc/pool.go create mode 100644 vendor/google.golang.org/api/transport/internal/dca/dca.go create mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.proto create mode 100644 vendor/google.golang.org/appengine/socket/doc.go create mode 100644 vendor/google.golang.org/appengine/socket/socket_classic.go create mode 100644 vendor/google.golang.org/appengine/socket/socket_vm.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/kms/v1/alias.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/location/locations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/alts.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/common.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/utils.go create mode 100644 vendor/google.golang.org/grpc/credentials/google/google.go create mode 100644 vendor/google.golang.org/grpc/credentials/google/xds.go create mode 100644 vendor/google.golang.org/grpc/credentials/oauth/oauth.go create mode 100644 vendor/google.golang.org/grpc/health/client.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go create mode 100644 vendor/google.golang.org/grpc/health/logging.go create mode 100644 vendor/google.golang.org/grpc/health/server.go create mode 100644 vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go create mode 100644 vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go create mode 100644 vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go create mode 100644 vendor/google.golang.org/grpc/reflection/README.md create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/serverreflection.go diff --git a/go.mod b/go.mod index 9ec90db9c97..9c239ec4651 100644 --- a/go.mod +++ b/go.mod @@ -18,13 +18,13 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/opencontainers/image-spec v1.1.0-rc2 github.com/pkg/errors v0.9.1 - github.com/sigstore/sigstore v1.4.6 + github.com/sigstore/sigstore v1.5.0 github.com/spiffe/go-spiffe/v2 v2.1.1 github.com/spiffe/spire-api-sdk v1.5.0 github.com/tektoncd/plumbing v0.0.0-20220817140952-3da8ce01aeeb go.opencensus.io v0.24.0 go.uber.org/zap v1.24.0 - golang.org/x/oauth2 v0.2.0 + golang.org/x/oauth2 v0.3.0 gomodules.xyz/jsonpatch/v2 v2.2.0 gopkg.in/square/go-jose.v2 v2.6.0 k8s.io/api v0.25.4 @@ -62,20 +62,55 @@ require ( ) require ( - cloud.google.com/go/compute/metadata v0.2.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8 // indirect + cloud.google.com/go/compute/metadata v0.2.2 // indirect + cloud.google.com/go/iam v0.8.0 // indirect + cloud.google.com/go/kms v1.7.0 // indirect + github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect + github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/armon/go-radix v1.0.0 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.19.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 // indirect + github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/golang/snappy v0.0.4 // indirect github.com/google/gnostic v0.6.9 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect + github.com/googleapis/gax-go/v2 v2.7.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-hclog v1.3.1 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-plugin v1.4.6 // indirect + github.com/hashicorp/go-retryablehttp v0.7.1 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect + github.com/hashicorp/go-sockaddr v1.0.2 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/vault/api v1.8.2 // indirect + github.com/hashicorp/vault/sdk v0.6.1 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect + github.com/jellydator/ttlcache/v2 v2.11.1 // indirect github.com/kr/pretty v0.3.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/rogpeppe/go-internal v1.8.0 // indirect + github.com/ryanuber/go-glob v1.0.0 // indirect github.com/theupdateframework/go-tuf v0.5.2-0.20220930112810-3890c1e7ace4 // indirect github.com/zeebo/errs v1.2.2 // indirect go.uber.org/goleak v1.2.0 // indirect ) require ( - cloud.google.com/go/compute v1.12.1 // indirect + cloud.google.com/go/compute v1.13.0 // indirect contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect contrib.go.opencensus.io/exporter/prometheus v0.4.0 // indirect github.com/Azure/azure-sdk-for-go v67.1.0+incompatible // indirect @@ -87,19 +122,19 @@ require ( github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/aws/aws-sdk-go-v2 v1.17.1 // indirect - github.com/aws/aws-sdk-go-v2/config v1.18.3 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.13.3 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26 // indirect + github.com/aws/aws-sdk-go-v2 v1.17.2 // indirect + github.com/aws/aws-sdk-go-v2/config v1.18.4 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.4 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 // indirect github.com/aws/aws-sdk-go-v2/service/ecr v1.17.18 // indirect github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.17 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.11.25 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.17.5 // indirect - github.com/aws/smithy-go v1.13.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 // indirect + github.com/aws/smithy-go v1.13.5 // indirect github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20221004211355-a250ad2ca1e3 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect @@ -163,18 +198,18 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/automaxprocs v1.4.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/crypto v0.3.0 + golang.org/x/crypto v0.4.0 golang.org/x/mod v0.6.0 // indirect - golang.org/x/net v0.2.0 // indirect + golang.org/x/net v0.3.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.2.0 // indirect - golang.org/x/term v0.2.0 // indirect - golang.org/x/text v0.4.0 // indirect + golang.org/x/sys v0.3.0 // indirect + golang.org/x/term v0.3.0 // indirect + golang.org/x/text v0.5.0 // indirect golang.org/x/time v0.2.0 // indirect golang.org/x/tools v0.1.12 // indirect - google.golang.org/api v0.103.0 // indirect + google.golang.org/api v0.104.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20221111202108-142d8a6fa32e // indirect + google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6 // indirect google.golang.org/grpc v1.51.0 google.golang.org/protobuf v1.28.1 gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 69675080bf5..b594e9ca636 100644 --- a/go.sum +++ b/go.sum @@ -20,6 +20,7 @@ cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECH cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -28,11 +29,19 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0 h1:AYrLkB8NPdDRslNp4Jxmzrhdr03fUAIDbiGFjLWowoU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.2 h1:aWKAjYaBaOSrpKl57+jnS/3fJRQnxL7TvR/u1VVbt6k= +cloud.google.com/go/compute/metadata v0.2.2/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/kms v1.7.0 h1:8FCf8C7qfOuSr6YzOQ4RGjJvswSRFeOpur3nHOlJbio= +cloud.google.com/go/kms v1.7.0/go.mod h1:k2UdVoNIHLJi/Rnng6dN0vlq7lS3jHSDiZasft+gmYE= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -77,6 +86,10 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935 github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= @@ -84,6 +97,7 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= @@ -129,7 +143,11 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -137,24 +155,38 @@ github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZo github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= github.com/aws/aws-sdk-go-v2 v1.17.1 h1:02c72fDJr87N8RAC2s3Qu0YuvMRZKNZJ9F+lAehCazk= github.com/aws/aws-sdk-go-v2 v1.17.1/go.mod h1:JLnGeGONAyi2lWXI1p0PCIOIy333JMVK1U7Hf0aRFLw= +github.com/aws/aws-sdk-go-v2 v1.17.2 h1:r0yRZInwiPBNpQ4aDy/Ssh3ROWsGtKDwar2JS8Lm+N8= +github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2/config v1.17.8/go.mod h1:UkCI3kb0sCdvtjiXYiU4Zx5h07BOpgBTtkPu/49r+kA= github.com/aws/aws-sdk-go-v2/config v1.18.3 h1:3kfBKcX3votFX84dm00U8RGA1sCCh3eRMOGzg5dCWfU= github.com/aws/aws-sdk-go-v2/config v1.18.3/go.mod h1:BYdrbeCse3ZnOD5+2/VE/nATOK8fEUpBtmPMdKSyhMU= +github.com/aws/aws-sdk-go-v2/config v1.18.4 h1:VZKhr3uAADXHStS/Gf9xSYVmmaluTUfkc0dcbPiDsKE= +github.com/aws/aws-sdk-go-v2/config v1.18.4/go.mod h1:EZxMPLSdGAZ3eAmkqXfYbRppZJTzFTkv8VyEzJhKko4= github.com/aws/aws-sdk-go-v2/credentials v1.12.21/go.mod h1:O+4XyAt4e+oBAoIwNUYkRg3CVMscaIJdmZBOcPgJ8D8= github.com/aws/aws-sdk-go-v2/credentials v1.13.3 h1:ur+FHdp4NbVIv/49bUjBW+FE7e57HOo03ELodttmagk= github.com/aws/aws-sdk-go-v2/credentials v1.13.3/go.mod h1:/rOMmqYBcFfNbRPU0iN9IgGqD5+V2yp3iWNmIlz0wI4= +github.com/aws/aws-sdk-go-v2/credentials v1.13.4 h1:nEbHIyJy7mCvQ/kzGG7VWHSBpRB4H6sJy3bWierWUtg= +github.com/aws/aws-sdk-go-v2/credentials v1.13.4/go.mod h1:/Cj5w9LRsNTLSwexsohwDME32OzJ6U81Zs33zr2ZWOM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17/go.mod h1:yIkQcCDYNsZfXpd5UX2Cy+sWA1jPgIhGTw9cOBzfVnQ= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19 h1:E3PXZSI3F2bzyj6XxUXdTIfvp425HHhwKsFvmzBwHgs= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19/go.mod h1:VihW95zQpeKQWVPGkwT+2+WJNQV8UXFfMTWdU6VErL8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 h1:tpNOglTZ8kg9T38NpcGBxudqfUAwUzyUnLQ4XSd0CHE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20/go.mod h1:d9xFpWd3qYwdIXM0fvu7deD08vvdRXyc/ueV+0SqaWE= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23/go.mod h1:2DFxAQ9pfIRy0imBCJv+vZ2X6RKxves6fbnEuSry6b4= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25 h1:nBO/RFxeq/IS5G9Of+ZrgucRciie2qpLy++3UGZ+q2E= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25/go.mod h1:Zb29PYkf42vVYQY6pvSyJCJcFHlPIiY+YKdPtwnvMkY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26 h1:5WU31cY7m0tG+AiaXuXGoMzo2GBQ1IixtWa8Yywsgco= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26/go.mod h1:2E0LdbJW6lbeU4uxjum99GZzI0ZjDpAb0CoSCM0oeEY= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17/go.mod h1:pRwaTYCJemADaqCbUAxltMoHKata7hmB5PjEXeu0kfg= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19 h1:oRHDrwCTVT8ZXi4sr9Ld+EXk7N/KGssOr2ygNeojEhw= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19/go.mod h1:6Q0546uHDp421okhmmGfbxzq2hBqbXFNpi4k+Q1JnQA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20 h1:WW0qSzDWoiWU2FS5DbKpxGilFVlCEJPwx4YtjdfI0Jw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20/go.mod h1:/+6lSiby8TBFpTVXZgKiN/rCfkYXEGvhlM4zCgPpt7w= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24/go.mod h1:jULHjqqjDlbyTa7pfM7WICATnOv+iOhjletM3N0Xbu8= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26 h1:Mza+vlnZr+fPKFKRq/lKGVvM6B/8ZZmNdEopOwSQLms= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26/go.mod h1:Y2OJ+P+MC1u1VKnavT+PshiEuGPyh/7DqxoDNij4/bg= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 h1:N2eKFw2S+JWRCtTt0IhIX7uoGGQciD4p6ba+SJv4WEU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27/go.mod h1:RdwFVc7PBYWY33fa2+8T1mSqQ7ZEK4ILpM0wfioDC3w= github.com/aws/aws-sdk-go-v2/service/ecr v1.17.18 h1:uiF/RI+Up8H2xdgT2GWa20YzxiKEalHieqNjm6HC3Xk= github.com/aws/aws-sdk-go-v2/service/ecr v1.17.18/go.mod h1:DQtDYmexqR+z+B6HBCvY7zK/tuXKv6Zy/IwOXOK3eow= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.17 h1:bcQy5/dcJO8VQD+p0tDoIYdgEC3ch9f1/BNRES7XMug= @@ -162,18 +194,30 @@ github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.17/go.mod h1:r1Vuka0kyzqN0s github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17/go.mod h1:4nYOrY41Lrbk2170/BGkcJKBhws9Pfn8MG3aGqjjeFI= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19 h1:GE25AWCdNUPh9AOJzI9KIJnja7IwUc1WyUqz/JTyJ/I= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19/go.mod h1:02CP6iuYP+IVnBX5HULVdSAku/85eHB2Y9EsFhrkEwU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 h1:jlgyHbkZQAgAc7VIxJDmtouH8eNjOk2REVAQfVhdaiQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20/go.mod h1:Xs52xaLBqDEKRcAfX/hgjmD3YQ7c/W+BEyfamlO/W2E= +github.com/aws/aws-sdk-go-v2/service/kms v1.19.2 h1:pgOVfu7E6zBddKGks4TvL4YuFsL/oTpiWDIzs4WPLjY= +github.com/aws/aws-sdk-go-v2/service/kms v1.19.2/go.mod h1:XH60PhgtbXDXFBzJ2auE6bpIELxAYTnoVFFwPtG8JwY= github.com/aws/aws-sdk-go-v2/service/sso v1.11.23/go.mod h1:/w0eg9IhFGjGyyncHIQrXtU8wvNsTJOP0R6PPj0wf80= github.com/aws/aws-sdk-go-v2/service/sso v1.11.25 h1:GFZitO48N/7EsFDt8fMa5iYdmWqkUDDB3Eje6z3kbG0= github.com/aws/aws-sdk-go-v2/service/sso v1.11.25/go.mod h1:IARHuzTXmj1C0KS35vboR0FeJ89OkEy1M9mWbK2ifCI= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 h1:ActQgdTNQej/RuUJjB9uxYVLDOvRGtUreXF8L3c8wyg= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.26/go.mod h1:uB9tV79ULEZUXc6Ob18A46KSQ0JDlrplPni9XW6Ot60= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6/go.mod h1:csZuQY65DAdFBt1oIjO5hhBR49kQqop4+lcuCjf2arA= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8 h1:jcw6kKZrtNfBPJkaHrscDOZoe5gvi9wjudnxvozYFJo= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8/go.mod h1:er2JHN+kBY6FcMfcBBKNGCT3CarImmdFzishsqBmSRI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 h1:wihKuqYUlA2T/Rx+yu2s6NDAns8B9DgnRooB1PVhY+Q= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9/go.mod h1:2E/3D/mB8/r2J7nK42daoKP/ooCwbf0q1PznNc+DZTU= github.com/aws/aws-sdk-go-v2/service/sts v1.16.19/go.mod h1:h4J3oPZQbxLhzGnk+j9dfYHi5qIOVJ5kczZd658/ydM= github.com/aws/aws-sdk-go-v2/service/sts v1.17.5 h1:60SJ4lhvn///8ygCzYy2l53bFW/Q15bVfyjyAWo6zuw= github.com/aws/aws-sdk-go-v2/service/sts v1.17.5/go.mod h1:bXcN3koeVYiJcdDU89n3kCYILob7Y34AeLopUbZgLT4= +github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 h1:VQFOLQVL3BrKM/NLO/7FiS4vcp5bqK0mGMyk09xLoAY= +github.com/aws/aws-sdk-go-v2/service/sts v1.17.6/go.mod h1:Az3OXXYGyfNwQNsK/31L4R75qFYnO641RZGAoV3uH1c= github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/aws/smithy-go v1.13.4 h1:/RN2z1txIJWeXeOkzX+Hk/4Uuvv7dWtCjbmVJcrskyk= github.com/aws/smithy-go v1.13.4/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= +github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20221004211355-a250ad2ca1e3 h1:Ted/bR1N6ltMrASdwRhX1BrGYSFg3aeGMlK8GlgkGh4= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20221004211355-a250ad2ca1e3/go.mod h1:m06KtrZgOloUaePAQMv+Ha8kRmTnKdozTHZrweepIrw= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= @@ -201,6 +245,8 @@ github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx2 github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -218,6 +264,8 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudevents/sdk-go/v2 v2.12.0 h1:p1k+ysVOZtNiXfijnwB3WqZNA3y2cGOiKQygWkUHCEI= github.com/cloudevents/sdk-go/v2 v2.12.0/go.mod h1:xDmKfzNjM8gBvjaF8ijFjM1VYOVUEeUfapHMUX1T5To= @@ -410,6 +458,8 @@ github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yi github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg= github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= @@ -524,6 +574,7 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -580,8 +631,12 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= @@ -609,19 +664,45 @@ github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FK github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v1.3.1 h1:vDwF1DFNZhntP4DAjuTpOw3uEgMUpXh1pB5fW9DqHpo= +github.com/hashicorp/go-hclog v1.3.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.6 h1:MDV3UrKQBM3du3G7MApDGvOsMYy3JQJ4exhSoKBAeVA= +github.com/hashicorp/go-plugin v1.4.6/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= +github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= @@ -630,11 +711,18 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/vault/api v1.8.2 h1:C7OL9YtOtwQbTKI9ogB0A1wffRbCN+rH/LLCHO3d8HM= +github.com/hashicorp/vault/api v1.8.2/go.mod h1:ML8aYzBIhY5m1MD1B2Q0JV89cC85YVH4t5kBaZiyVaE= +github.com/hashicorp/vault/sdk v0.6.1 h1:sjZC1z4j5Rh2GXYbkxn5BLK05S1p7+MhW4AgdUmgRUA= +github.com/hashicorp/vault/sdk v0.6.1/go.mod h1:Ck4JuAC6usTphfrrRJCRH+7/N7O2ozZzkm/fzQFt4uM= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc= github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -656,6 +744,8 @@ github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/U github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jellydator/ttlcache/v2 v2.11.1 h1:AZGME43Eh2Vv3giG6GeqeLeFXxwxn1/qHItqWZl6U64= +github.com/jellydator/ttlcache/v2 v2.11.1/go.mod h1:RtE5Snf0/57e+2cLWFYWCCsLas2Hy3c5Z4n14XmSvTI= github.com/jenkins-x/go-scm v1.11.35 h1:DcxonfFMf8jqqSWZHCsj40Cp1++hTAmlFtIf7IEl3vk= github.com/jenkins-x/go-scm v1.11.35/go.mod h1:JyL6p5dNNeJZt41yfysDTBe0Q51cLa5MVK0du7JnXt0= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -673,6 +763,7 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -727,8 +818,16 @@ github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHef github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -745,11 +844,16 @@ github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -779,6 +883,8 @@ github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -831,11 +937,13 @@ github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6 github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -853,6 +961,7 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= @@ -870,6 +979,7 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= @@ -906,6 +1016,9 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -921,6 +1034,8 @@ github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f/go.mod h1:AuYgA5K github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sigstore/sigstore v1.4.6 h1:2F1LPnQf6h1lRDCyNMoBE0WCPsA+IU5kAEAbGxG8S+U= github.com/sigstore/sigstore v1.4.6/go.mod h1:jGHEfVTFgpfDpBz7pSY4X+Sd+g36qdAUxGufNk47k7g= +github.com/sigstore/sigstore v1.5.0 h1:NqstQ6SwwhQsp6Ll0wgk/d9g5MlfmEppo14aquUjJ/8= +github.com/sigstore/sigstore v1.5.0/go.mod h1:fRAaZ9xXh7ZQ0GJqZdpmNJ3pemuHBu2PgIAngmzIFSI= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -978,6 +1093,7 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= @@ -996,6 +1112,7 @@ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= @@ -1064,6 +1181,7 @@ go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0= go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -1100,6 +1218,8 @@ golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= +golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1199,6 +1319,8 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0 h1:VWL6FNY2bEEmsGVKabSlHu5Irp34xmMRoqb/9lF9lxk= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1215,6 +1337,8 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.2.0 h1:GtQkldQ9m7yvzCL1V+LrYow3Khe0eJH0w7RbX/VbaIU= golang.org/x/oauth2 v0.2.0/go.mod h1:Cwn6afJ8jrQwYMxQDTpISoXmXW9I6qF6vDeuuoX3Ibs= +golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8= +golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1268,6 +1392,7 @@ golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1316,17 +1441,25 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1338,6 +1471,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1366,6 +1501,7 @@ golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDq golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1403,6 +1539,7 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -1441,6 +1578,8 @@ google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1Avk google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= google.golang.org/api v0.103.0 h1:9yuVqlu2JCvcLg9p8S3fcFLZij8EPSyvODIY1rkMizQ= google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.104.0 h1:KBfmLRqdZEbwQleFlSLnzpQJwhjpmNOk4cKQIBDZ9mg= +google.golang.org/api v0.104.0/go.mod h1:JCspTXJbBxa5ySXw4UgUqVer7DfVxbvc/CTUFqAED5U= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1500,6 +1639,8 @@ google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxH google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20221111202108-142d8a6fa32e h1:azcyH5lGzGy7pkLCbhPe0KkKxsM7c6UA/FZIXImKE7M= google.golang.org/genproto v0.0.0-20221111202108-142d8a6fa32e/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6 h1:AGXp12e/9rItf6/4QymU7WsAUwCf+ICW75cuR91nJIc= +google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6/go.mod h1:1dOng4TWOomJrDGhpXjfCD35wQC6jnC7HpRmOFRqEV0= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= diff --git a/pkg/trustedresources/verifier/verifier.go b/pkg/trustedresources/verifier/verifier.go index 7a6b849f2bc..a229770976f 100644 --- a/pkg/trustedresources/verifier/verifier.go +++ b/pkg/trustedresources/verifier/verifier.go @@ -26,6 +26,10 @@ import ( "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/sigstore/sigstore/pkg/signature" + _ "github.com/sigstore/sigstore/pkg/signature/kms/aws" + _ "github.com/sigstore/sigstore/pkg/signature/kms/azure" + _ "github.com/sigstore/sigstore/pkg/signature/kms/gcp" + _ "github.com/sigstore/sigstore/pkg/signature/kms/hashivault" "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" v1 "k8s.io/api/core/v1" diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index 5ac4a843e11..efedadbea25 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.12.1" +const Version = "1.13.0" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 8631b6d6d2c..6e3ee8d6ab1 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) + + +### Bug Fixes + +* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) + ## [0.1.0] (2022-10-26) Initial release of metadata being it's own module. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 50538b1d343..d4aad9bf395 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -70,6 +70,7 @@ func newDefaultHTTPClient() *http.Client { Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, + IdleConnTimeout: 60 * time.Second, }, Timeout: 5 * time.Second, } diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md new file mode 100644 index 00000000000..ced217827b0 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -0,0 +1,62 @@ +# Changes + +## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.7.0...iam/v0.8.0) (2022-12-05) + + +### Features + +* **iam:** Start generating and refresh some libraries ([#7089](https://github.com/googleapis/google-cloud-go/issues/7089)) ([a9045ff](https://github.com/googleapis/google-cloud-go/commit/a9045ff191a711089c37f1d94a63522d9939ce38)) + +## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.6.0...iam/v0.7.0) (2022-11-03) + + +### Features + +* **iam:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) + +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.5.0...iam/v0.6.0) (2022-10-25) + + +### Features + +* **iam:** start generating stubs dir ([de2d180](https://github.com/googleapis/google-cloud-go/commit/de2d18066dc613b72f6f8db93ca60146dabcfdcc)) + +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.4.0...iam/v0.5.0) (2022-09-28) + + +### Features + +* **iam:** remove ListApplicablePolicies ([52dddd1](https://github.com/googleapis/google-cloud-go/commit/52dddd1ed89fbe77e1859311c3b993a77a82bfc7)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.3.0...iam/v0.4.0) (2022-09-06) + + +### Features + +* **iam:** start generating apiv2 ([#6605](https://github.com/googleapis/google-cloud-go/issues/6605)) ([a6004e7](https://github.com/googleapis/google-cloud-go/commit/a6004e762f782869cd85688937475744f7b17e50)) + +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.2.0...iam/v0.3.0) (2022-02-23) + + +### Features + +* **iam:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) + +## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.1.1...iam/v0.2.0) (2022-02-14) + + +### Features + +* **iam:** add file for tracking version ([17b36ea](https://github.com/googleapis/google-cloud-go/commit/17b36ead42a96b1a01105122074e65164357519e)) + +### [0.1.1](https://www.github.com/googleapis/google-cloud-go/compare/iam/v0.1.0...iam/v0.1.1) (2022-01-14) + + +### Bug Fixes + +* **iam:** run formatter ([#5277](https://www.github.com/googleapis/google-cloud-go/issues/5277)) ([8682e4e](https://www.github.com/googleapis/google-cloud-go/commit/8682e4ed57a4428a659fbc225f56c91767e2a4a9)) + +## v0.1.0 + +This is the first tag to carve out iam as its own module. See +[Add a module to a multi-module repository](https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository). diff --git a/vendor/cloud.google.com/go/iam/LICENSE b/vendor/cloud.google.com/go/iam/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/iam/README.md b/vendor/cloud.google.com/go/iam/README.md new file mode 100644 index 00000000000..0072cc9e294 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/README.md @@ -0,0 +1,40 @@ +# IAM API + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/iam.svg)](https://pkg.go.dev/cloud.google.com/go/iam) + +Go Client Library for IAM API. + +## Install + +```bash +go get cloud.google.com/go/iam +``` + +## Stability + +The stability of this module is indicated by SemVer. + +However, a `v1+` module may have breaking changes in two scenarios: + +* Packages with `alpha` or `beta` in the import path +* The GoDoc has an explicit stability disclaimer (for example, for an experimental feature). + +## Go Version Support + +See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) +section in the root directory's README. + +## Authorization + +See the [Authorization](https://github.com/googleapis/google-cloud-go#authorization) +section in the root directory's README. + +## Contributing + +Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. See +[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go new file mode 100644 index 00000000000..2793098aabc --- /dev/null +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -0,0 +1,672 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.5 +// source: google/iam/v1/iam_policy.proto + +package iampb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Request message for `SetIamPolicy` method. +type SetIamPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // REQUIRED: The resource for which the policy is being specified. + // See the operation documentation for the appropriate value for this field. + Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // REQUIRED: The complete policy to be applied to the `resource`. The size of + // the policy is limited to a few 10s of KB. An empty policy is a + // valid policy but certain Cloud Platform services (such as Projects) + // might reject them. + Policy *Policy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"` + // OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only + // the fields in the mask will be modified. If no mask is provided, the + // following default mask is used: + // + // `paths: "bindings, etag"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *SetIamPolicyRequest) Reset() { + *x = SetIamPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetIamPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetIamPolicyRequest) ProtoMessage() {} + +func (x *SetIamPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetIamPolicyRequest.ProtoReflect.Descriptor instead. +func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{0} +} + +func (x *SetIamPolicyRequest) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +func (x *SetIamPolicyRequest) GetPolicy() *Policy { + if x != nil { + return x.Policy + } + return nil +} + +func (x *SetIamPolicyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Request message for `GetIamPolicy` method. +type GetIamPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // REQUIRED: The resource for which the policy is being requested. + // See the operation documentation for the appropriate value for this field. + Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // OPTIONAL: A `GetPolicyOptions` object for specifying options to + // `GetIamPolicy`. + Options *GetPolicyOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *GetIamPolicyRequest) Reset() { + *x = GetIamPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetIamPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetIamPolicyRequest) ProtoMessage() {} + +func (x *GetIamPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetIamPolicyRequest.ProtoReflect.Descriptor instead. +func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{1} +} + +func (x *GetIamPolicyRequest) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +func (x *GetIamPolicyRequest) GetOptions() *GetPolicyOptions { + if x != nil { + return x.Options + } + return nil +} + +// Request message for `TestIamPermissions` method. +type TestIamPermissionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // REQUIRED: The resource for which the policy detail is being requested. + // See the operation documentation for the appropriate value for this field. + Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // The set of permissions to check for the `resource`. Permissions with + // wildcards (such as '*' or 'storage.*') are not allowed. For more + // information see + // [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). + Permissions []string `protobuf:"bytes,2,rep,name=permissions,proto3" json:"permissions,omitempty"` +} + +func (x *TestIamPermissionsRequest) Reset() { + *x = TestIamPermissionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TestIamPermissionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TestIamPermissionsRequest) ProtoMessage() {} + +func (x *TestIamPermissionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TestIamPermissionsRequest.ProtoReflect.Descriptor instead. +func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) { + return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{2} +} + +func (x *TestIamPermissionsRequest) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +func (x *TestIamPermissionsRequest) GetPermissions() []string { + if x != nil { + return x.Permissions + } + return nil +} + +// Response message for `TestIamPermissions` method. +type TestIamPermissionsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A subset of `TestPermissionsRequest.permissions` that the caller is + // allowed. + Permissions []string `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` +} + +func (x *TestIamPermissionsResponse) Reset() { + *x = TestIamPermissionsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TestIamPermissionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TestIamPermissionsResponse) ProtoMessage() {} + +func (x *TestIamPermissionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TestIamPermissionsResponse.ProtoReflect.Descriptor instead. +func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) { + return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{3} +} + +func (x *TestIamPermissionsResponse) GetPermissions() []string { + if x != nil { + return x.Permissions + } + return nil +} + +var File_google_iam_v1_iam_policy_proto protoreflect.FileDescriptor + +var file_google_iam_v1_iam_policy_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, + 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, + 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, + 0x31, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x01, + 0x0a, 0x13, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, + 0x01, 0x2a, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x32, 0x0a, 0x06, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, + 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x77, 0x0a, + 0x13, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, + 0x2a, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x39, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x69, 0x0a, 0x19, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, + 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, 0x2a, + 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0b, 0x70, 0x65, + 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0x3e, 0x0a, 0x1a, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x32, 0xb4, 0x03, 0x0a, 0x09, 0x49, 0x41, 0x4d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x74, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, + 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x74, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, + 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, + 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x9a, 0x01, 0x0a, 0x12, + 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, + 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22, + 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, + 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d, + 0x2d, 0x6d, 0x65, 0x74, 0x61, 0x2d, 0x61, 0x70, 0x69, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x86, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, + 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, + 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_iam_v1_iam_policy_proto_rawDescOnce sync.Once + file_google_iam_v1_iam_policy_proto_rawDescData = file_google_iam_v1_iam_policy_proto_rawDesc +) + +func file_google_iam_v1_iam_policy_proto_rawDescGZIP() []byte { + file_google_iam_v1_iam_policy_proto_rawDescOnce.Do(func() { + file_google_iam_v1_iam_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_iam_policy_proto_rawDescData) + }) + return file_google_iam_v1_iam_policy_proto_rawDescData +} + +var file_google_iam_v1_iam_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_iam_v1_iam_policy_proto_goTypes = []interface{}{ + (*SetIamPolicyRequest)(nil), // 0: google.iam.v1.SetIamPolicyRequest + (*GetIamPolicyRequest)(nil), // 1: google.iam.v1.GetIamPolicyRequest + (*TestIamPermissionsRequest)(nil), // 2: google.iam.v1.TestIamPermissionsRequest + (*TestIamPermissionsResponse)(nil), // 3: google.iam.v1.TestIamPermissionsResponse + (*Policy)(nil), // 4: google.iam.v1.Policy + (*fieldmaskpb.FieldMask)(nil), // 5: google.protobuf.FieldMask + (*GetPolicyOptions)(nil), // 6: google.iam.v1.GetPolicyOptions +} +var file_google_iam_v1_iam_policy_proto_depIdxs = []int32{ + 4, // 0: google.iam.v1.SetIamPolicyRequest.policy:type_name -> google.iam.v1.Policy + 5, // 1: google.iam.v1.SetIamPolicyRequest.update_mask:type_name -> google.protobuf.FieldMask + 6, // 2: google.iam.v1.GetIamPolicyRequest.options:type_name -> google.iam.v1.GetPolicyOptions + 0, // 3: google.iam.v1.IAMPolicy.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 1, // 4: google.iam.v1.IAMPolicy.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 2, // 5: google.iam.v1.IAMPolicy.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 4, // 6: google.iam.v1.IAMPolicy.SetIamPolicy:output_type -> google.iam.v1.Policy + 4, // 7: google.iam.v1.IAMPolicy.GetIamPolicy:output_type -> google.iam.v1.Policy + 3, // 8: google.iam.v1.IAMPolicy.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 6, // [6:9] is the sub-list for method output_type + 3, // [3:6] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_google_iam_v1_iam_policy_proto_init() } +func file_google_iam_v1_iam_policy_proto_init() { + if File_google_iam_v1_iam_policy_proto != nil { + return + } + file_google_iam_v1_options_proto_init() + file_google_iam_v1_policy_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetIamPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetIamPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TestIamPermissionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TestIamPermissionsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_iam_v1_iam_policy_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_iam_v1_iam_policy_proto_goTypes, + DependencyIndexes: file_google_iam_v1_iam_policy_proto_depIdxs, + MessageInfos: file_google_iam_v1_iam_policy_proto_msgTypes, + }.Build() + File_google_iam_v1_iam_policy_proto = out.File + file_google_iam_v1_iam_policy_proto_rawDesc = nil + file_google_iam_v1_iam_policy_proto_goTypes = nil + file_google_iam_v1_iam_policy_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// IAMPolicyClient is the client API for IAMPolicy service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type IAMPolicyClient interface { + // Sets the access control policy on the specified resource. Replaces any + // existing policy. + // + // Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. + SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) + // Gets the access control policy for a resource. + // Returns an empty policy if the resource exists and does not have a policy + // set. + GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) + // Returns permissions that a caller has on the specified resource. + // If the resource does not exist, this will return an empty set of + // permissions, not a `NOT_FOUND` error. + // + // Note: This operation is designed to be used for building permission-aware + // UIs and command-line tools, not for authorization checking. This operation + // may "fail open" without warning. + TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) +} + +type iAMPolicyClient struct { + cc grpc.ClientConnInterface +} + +func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient { + return &iAMPolicyClient{cc} +} + +func (c *iAMPolicyClient) SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) { + out := new(Policy) + err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/SetIamPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMPolicyClient) GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) { + out := new(Policy) + err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/GetIamPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMPolicyClient) TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) { + out := new(TestIamPermissionsResponse) + err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/TestIamPermissions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// IAMPolicyServer is the server API for IAMPolicy service. +type IAMPolicyServer interface { + // Sets the access control policy on the specified resource. Replaces any + // existing policy. + // + // Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. + SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) + // Gets the access control policy for a resource. + // Returns an empty policy if the resource exists and does not have a policy + // set. + GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) + // Returns permissions that a caller has on the specified resource. + // If the resource does not exist, this will return an empty set of + // permissions, not a `NOT_FOUND` error. + // + // Note: This operation is designed to be used for building permission-aware + // UIs and command-line tools, not for authorization checking. This operation + // may "fail open" without warning. + TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) +} + +// UnimplementedIAMPolicyServer can be embedded to have forward compatible implementations. +type UnimplementedIAMPolicyServer struct { +} + +func (*UnimplementedIAMPolicyServer) SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") +} +func (*UnimplementedIAMPolicyServer) GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") +} +func (*UnimplementedIAMPolicyServer) TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") +} + +func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { + s.RegisterService(&_IAMPolicy_serviceDesc, srv) +} + +func _IAMPolicy_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMPolicyServer).SetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.v1.IAMPolicy/SetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMPolicyServer).SetIamPolicy(ctx, req.(*SetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAMPolicy_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMPolicyServer).GetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.v1.IAMPolicy/GetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMPolicyServer).GetIamPolicy(ctx, req.(*GetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAMPolicy_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TestIamPermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMPolicyServer).TestIamPermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.v1.IAMPolicy/TestIamPermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMPolicyServer).TestIamPermissions(ctx, req.(*TestIamPermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _IAMPolicy_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.iam.v1.IAMPolicy", + HandlerType: (*IAMPolicyServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SetIamPolicy", + Handler: _IAMPolicy_SetIamPolicy_Handler, + }, + { + MethodName: "GetIamPolicy", + Handler: _IAMPolicy_GetIamPolicy_Handler, + }, + { + MethodName: "TestIamPermissions", + Handler: _IAMPolicy_TestIamPermissions_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/iam/v1/iam_policy.proto", +} diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go new file mode 100644 index 00000000000..835f2171998 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -0,0 +1,187 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.5 +// source: google/iam/v1/options.proto + +package iampb + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Encapsulates settings provided to GetIamPolicy. +type GetPolicyOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The maximum policy version that will be used to format the + // policy. + // + // Valid values are 0, 1, and 3. Requests specifying an invalid value will be + // rejected. + // + // Requests for policies with any conditional role bindings must specify + // version 3. Policies with no conditional role bindings may specify any valid + // value or leave the field unset. + // + // The policy in the response might use the policy version that you specified, + // or it might use a lower policy version. For example, if you specify version + // 3, but the policy has no conditional role bindings, the response uses + // version 1. + // + // To learn which resources support conditions in their IAM policies, see the + // [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + RequestedPolicyVersion int32 `protobuf:"varint,1,opt,name=requested_policy_version,json=requestedPolicyVersion,proto3" json:"requested_policy_version,omitempty"` +} + +func (x *GetPolicyOptions) Reset() { + *x = GetPolicyOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_options_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetPolicyOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPolicyOptions) ProtoMessage() {} + +func (x *GetPolicyOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_options_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPolicyOptions.ProtoReflect.Descriptor instead. +func (*GetPolicyOptions) Descriptor() ([]byte, []int) { + return file_google_iam_v1_options_proto_rawDescGZIP(), []int{0} +} + +func (x *GetPolicyOptions) GetRequestedPolicyVersion() int32 { + if x != nil { + return x.RequestedPolicyVersion + } + return 0 +} + +var File_google_iam_v1_options_proto protoreflect.FileDescriptor + +var file_google_iam_v1_options_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x22, 0x4c, 0x0a, 0x10, + 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x38, 0x0a, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x84, 0x01, 0x0a, 0x11, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, + 0x42, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, + 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_iam_v1_options_proto_rawDescOnce sync.Once + file_google_iam_v1_options_proto_rawDescData = file_google_iam_v1_options_proto_rawDesc +) + +func file_google_iam_v1_options_proto_rawDescGZIP() []byte { + file_google_iam_v1_options_proto_rawDescOnce.Do(func() { + file_google_iam_v1_options_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_options_proto_rawDescData) + }) + return file_google_iam_v1_options_proto_rawDescData +} + +var file_google_iam_v1_options_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_iam_v1_options_proto_goTypes = []interface{}{ + (*GetPolicyOptions)(nil), // 0: google.iam.v1.GetPolicyOptions +} +var file_google_iam_v1_options_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_iam_v1_options_proto_init() } +func file_google_iam_v1_options_proto_init() { + if File_google_iam_v1_options_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetPolicyOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_iam_v1_options_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_iam_v1_options_proto_goTypes, + DependencyIndexes: file_google_iam_v1_options_proto_depIdxs, + MessageInfos: file_google_iam_v1_options_proto_msgTypes, + }.Build() + File_google_iam_v1_options_proto = out.File + file_google_iam_v1_options_proto_rawDesc = nil + file_google_iam_v1_options_proto_goTypes = nil + file_google_iam_v1_options_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go new file mode 100644 index 00000000000..ec7777a7687 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -0,0 +1,1169 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.5 +// source: google/iam/v1/policy.proto + +package iampb + +import ( + reflect "reflect" + sync "sync" + + expr "google.golang.org/genproto/googleapis/type/expr" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The list of valid permission types for which logging can be configured. +// Admin writes are always logged, and are not configurable. +type AuditLogConfig_LogType int32 + +const ( + // Default case. Should never be this. + AuditLogConfig_LOG_TYPE_UNSPECIFIED AuditLogConfig_LogType = 0 + // Admin reads. Example: CloudIAM getIamPolicy + AuditLogConfig_ADMIN_READ AuditLogConfig_LogType = 1 + // Data writes. Example: CloudSQL Users create + AuditLogConfig_DATA_WRITE AuditLogConfig_LogType = 2 + // Data reads. Example: CloudSQL Users list + AuditLogConfig_DATA_READ AuditLogConfig_LogType = 3 +) + +// Enum value maps for AuditLogConfig_LogType. +var ( + AuditLogConfig_LogType_name = map[int32]string{ + 0: "LOG_TYPE_UNSPECIFIED", + 1: "ADMIN_READ", + 2: "DATA_WRITE", + 3: "DATA_READ", + } + AuditLogConfig_LogType_value = map[string]int32{ + "LOG_TYPE_UNSPECIFIED": 0, + "ADMIN_READ": 1, + "DATA_WRITE": 2, + "DATA_READ": 3, + } +) + +func (x AuditLogConfig_LogType) Enum() *AuditLogConfig_LogType { + p := new(AuditLogConfig_LogType) + *p = x + return p +} + +func (x AuditLogConfig_LogType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AuditLogConfig_LogType) Descriptor() protoreflect.EnumDescriptor { + return file_google_iam_v1_policy_proto_enumTypes[0].Descriptor() +} + +func (AuditLogConfig_LogType) Type() protoreflect.EnumType { + return &file_google_iam_v1_policy_proto_enumTypes[0] +} + +func (x AuditLogConfig_LogType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AuditLogConfig_LogType.Descriptor instead. +func (AuditLogConfig_LogType) EnumDescriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{3, 0} +} + +// The type of action performed on a Binding in a policy. +type BindingDelta_Action int32 + +const ( + // Unspecified. + BindingDelta_ACTION_UNSPECIFIED BindingDelta_Action = 0 + // Addition of a Binding. + BindingDelta_ADD BindingDelta_Action = 1 + // Removal of a Binding. + BindingDelta_REMOVE BindingDelta_Action = 2 +) + +// Enum value maps for BindingDelta_Action. +var ( + BindingDelta_Action_name = map[int32]string{ + 0: "ACTION_UNSPECIFIED", + 1: "ADD", + 2: "REMOVE", + } + BindingDelta_Action_value = map[string]int32{ + "ACTION_UNSPECIFIED": 0, + "ADD": 1, + "REMOVE": 2, + } +) + +func (x BindingDelta_Action) Enum() *BindingDelta_Action { + p := new(BindingDelta_Action) + *p = x + return p +} + +func (x BindingDelta_Action) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (BindingDelta_Action) Descriptor() protoreflect.EnumDescriptor { + return file_google_iam_v1_policy_proto_enumTypes[1].Descriptor() +} + +func (BindingDelta_Action) Type() protoreflect.EnumType { + return &file_google_iam_v1_policy_proto_enumTypes[1] +} + +func (x BindingDelta_Action) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use BindingDelta_Action.Descriptor instead. +func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{5, 0} +} + +// The type of action performed on an audit configuration in a policy. +type AuditConfigDelta_Action int32 + +const ( + // Unspecified. + AuditConfigDelta_ACTION_UNSPECIFIED AuditConfigDelta_Action = 0 + // Addition of an audit configuration. + AuditConfigDelta_ADD AuditConfigDelta_Action = 1 + // Removal of an audit configuration. + AuditConfigDelta_REMOVE AuditConfigDelta_Action = 2 +) + +// Enum value maps for AuditConfigDelta_Action. +var ( + AuditConfigDelta_Action_name = map[int32]string{ + 0: "ACTION_UNSPECIFIED", + 1: "ADD", + 2: "REMOVE", + } + AuditConfigDelta_Action_value = map[string]int32{ + "ACTION_UNSPECIFIED": 0, + "ADD": 1, + "REMOVE": 2, + } +) + +func (x AuditConfigDelta_Action) Enum() *AuditConfigDelta_Action { + p := new(AuditConfigDelta_Action) + *p = x + return p +} + +func (x AuditConfigDelta_Action) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AuditConfigDelta_Action) Descriptor() protoreflect.EnumDescriptor { + return file_google_iam_v1_policy_proto_enumTypes[2].Descriptor() +} + +func (AuditConfigDelta_Action) Type() protoreflect.EnumType { + return &file_google_iam_v1_policy_proto_enumTypes[2] +} + +func (x AuditConfigDelta_Action) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AuditConfigDelta_Action.Descriptor instead. +func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{6, 0} +} + +// An Identity and Access Management (IAM) policy, which specifies access +// controls for Google Cloud resources. +// +// A `Policy` is a collection of `bindings`. A `binding` binds one or more +// `members`, or principals, to a single `role`. Principals can be user +// accounts, service accounts, Google groups, and domains (such as G Suite). A +// `role` is a named list of permissions; each `role` can be an IAM predefined +// role or a user-created custom role. +// +// For some types of Google Cloud resources, a `binding` can also specify a +// `condition`, which is a logical expression that allows access to a resource +// only if the expression evaluates to `true`. A condition can add constraints +// based on attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the +// [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). +// +// **JSON example:** +// +// { +// "bindings": [ +// { +// "role": "roles/resourcemanager.organizationAdmin", +// "members": [ +// "user:mike@example.com", +// "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" +// ] +// }, +// { +// "role": "roles/resourcemanager.organizationViewer", +// "members": [ +// "user:eve@example.com" +// ], +// "condition": { +// "title": "expirable access", +// "description": "Does not grant access after Sep 2020", +// "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", +// } +// } +// ], +// "etag": "BwWWja0YfJA=", +// "version": 3 +// } +// +// **YAML example:** +// +// bindings: +// - members: +// - user:mike@example.com +// - group:admins@example.com +// - domain:google.com +// - serviceAccount:my-project-id@appspot.gserviceaccount.com +// role: roles/resourcemanager.organizationAdmin +// - members: +// - user:eve@example.com +// role: roles/resourcemanager.organizationViewer +// condition: +// title: expirable access +// description: Does not grant access after Sep 2020 +// expression: request.time < timestamp('2020-10-01T00:00:00.000Z') +// etag: BwWWja0YfJA= +// version: 3 +// +// For a description of IAM and its features, see the +// [IAM documentation](https://cloud.google.com/iam/docs/). +type Policy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the format of the policy. + // + // Valid values are `0`, `1`, and `3`. Requests that specify an invalid value + // are rejected. + // + // Any operation that affects conditional role bindings must specify version + // `3`. This requirement applies to the following operations: + // + // - Getting a policy that includes a conditional role binding + // - Adding a conditional role binding to a policy + // - Changing a conditional role binding in a policy + // - Removing any role binding, with or without a condition, from a policy + // that includes conditions + // + // **Important:** If you use IAM Conditions, you must include the `etag` field + // whenever you call `setIamPolicy`. If you omit this field, then IAM allows + // you to overwrite a version `3` policy with a version `1` policy, and all of + // the conditions in the version `3` policy are lost. + // + // If a policy does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. + // + // To learn which resources support conditions in their IAM policies, see the + // [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Associates a list of `members`, or principals, with a `role`. Optionally, + // may specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one principal. + // + // The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 + // of these principals can be Google groups. Each occurrence of a principal + // counts towards these limits. For example, if the `bindings` grant 50 + // different roles to `user:alice@example.com`, and not to any other + // principal, then you can add another 1,450 principals to the `bindings` in + // the `Policy`. + Bindings []*Binding `protobuf:"bytes,4,rep,name=bindings,proto3" json:"bindings,omitempty"` + // Specifies cloud audit logging configuration for this policy. + AuditConfigs []*AuditConfig `protobuf:"bytes,6,rep,name=audit_configs,json=auditConfigs,proto3" json:"audit_configs,omitempty"` + // `etag` is used for optimistic concurrency control as a way to help + // prevent simultaneous updates of a policy from overwriting each other. + // It is strongly suggested that systems make use of the `etag` in the + // read-modify-write cycle to perform policy updates in order to avoid race + // conditions: An `etag` is returned in the response to `getIamPolicy`, and + // systems are expected to put that etag in the request to `setIamPolicy` to + // ensure that their change will be applied to the same version of the policy. + // + // **Important:** If you use IAM Conditions, you must include the `etag` field + // whenever you call `setIamPolicy`. If you omit this field, then IAM allows + // you to overwrite a version `3` policy with a version `1` policy, and all of + // the conditions in the version `3` policy are lost. + Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"` +} + +func (x *Policy) Reset() { + *x = Policy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Policy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Policy) ProtoMessage() {} + +func (x *Policy) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Policy.ProtoReflect.Descriptor instead. +func (*Policy) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{0} +} + +func (x *Policy) GetVersion() int32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *Policy) GetBindings() []*Binding { + if x != nil { + return x.Bindings + } + return nil +} + +func (x *Policy) GetAuditConfigs() []*AuditConfig { + if x != nil { + return x.AuditConfigs + } + return nil +} + +func (x *Policy) GetEtag() []byte { + if x != nil { + return x.Etag + } + return nil +} + +// Associates `members`, or principals, with a `role`. +type Binding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Role that is assigned to the list of `members`, or principals. + // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + // Specifies the principals requesting access for a Cloud Platform resource. + // `members` can have the following values: + // + // - `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. + // + // - `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. + // + // - `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . + // + // - `serviceAccount:{emailid}`: An email address that represents a service + // account. For example, `my-other-app@appspot.gserviceaccount.com`. + // + // - `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. + // + // - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a user that has been recently deleted. For + // example, `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered user + // retains the role in the binding. + // + // - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a service account that has been recently + // deleted. For example, + // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains the + // role in the binding. + // + // - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a Google group that has been recently + // deleted. For example, `admins@example.com?uid=123456789012345678901`. If + // the group is recovered, this value reverts to `group:{emailid}` and the + // recovered group retains the role in the binding. + // + // - `domain:{domain}`: The G Suite domain (primary) that represents all the + // users of that domain. For example, `google.com` or `example.com`. + Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` + // The condition that is associated with this binding. + // + // If the condition evaluates to `true`, then this binding applies to the + // current request. + // + // If the condition evaluates to `false`, then this binding does not apply to + // the current request. However, a different role binding might grant the same + // role to one or more of the principals in this binding. + // + // To learn which resources support conditions in their IAM policies, see the + // [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + Condition *expr.Expr `protobuf:"bytes,3,opt,name=condition,proto3" json:"condition,omitempty"` +} + +func (x *Binding) Reset() { + *x = Binding{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Binding) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Binding) ProtoMessage() {} + +func (x *Binding) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Binding.ProtoReflect.Descriptor instead. +func (*Binding) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{1} +} + +func (x *Binding) GetRole() string { + if x != nil { + return x.Role + } + return "" +} + +func (x *Binding) GetMembers() []string { + if x != nil { + return x.Members + } + return nil +} + +func (x *Binding) GetCondition() *expr.Expr { + if x != nil { + return x.Condition + } + return nil +} + +// Specifies the audit configuration for a service. +// The configuration determines which permission types are logged, and what +// identities, if any, are exempted from logging. +// An AuditConfig must have one or more AuditLogConfigs. +// +// If there are AuditConfigs for both `allServices` and a specific service, +// the union of the two AuditConfigs is used for that service: the log_types +// specified in each AuditConfig are enabled, and the exempted_members in each +// AuditLogConfig are exempted. +// +// Example Policy with multiple AuditConfigs: +// +// { +// "audit_configs": [ +// { +// "service": "allServices", +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ", +// "exempted_members": [ +// "user:jose@example.com" +// ] +// }, +// { +// "log_type": "DATA_WRITE" +// }, +// { +// "log_type": "ADMIN_READ" +// } +// ] +// }, +// { +// "service": "sampleservice.googleapis.com", +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ" +// }, +// { +// "log_type": "DATA_WRITE", +// "exempted_members": [ +// "user:aliya@example.com" +// ] +// } +// ] +// } +// ] +// } +// +// For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ +// logging. It also exempts jose@example.com from DATA_READ logging, and +// aliya@example.com from DATA_WRITE logging. +type AuditConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies a service that will be enabled for audit logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. + // `allServices` is a special value that covers all services. + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + // The configuration for logging of each type of permission. + AuditLogConfigs []*AuditLogConfig `protobuf:"bytes,3,rep,name=audit_log_configs,json=auditLogConfigs,proto3" json:"audit_log_configs,omitempty"` +} + +func (x *AuditConfig) Reset() { + *x = AuditConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuditConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuditConfig) ProtoMessage() {} + +func (x *AuditConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuditConfig.ProtoReflect.Descriptor instead. +func (*AuditConfig) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{2} +} + +func (x *AuditConfig) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *AuditConfig) GetAuditLogConfigs() []*AuditLogConfig { + if x != nil { + return x.AuditLogConfigs + } + return nil +} + +// Provides the configuration for logging a type of permissions. +// Example: +// +// { +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ", +// "exempted_members": [ +// "user:jose@example.com" +// ] +// }, +// { +// "log_type": "DATA_WRITE" +// } +// ] +// } +// +// This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting +// jose@example.com from DATA_READ logging. +type AuditLogConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The log type that this config enables. + LogType AuditLogConfig_LogType `protobuf:"varint,1,opt,name=log_type,json=logType,proto3,enum=google.iam.v1.AuditLogConfig_LogType" json:"log_type,omitempty"` + // Specifies the identities that do not cause logging for this type of + // permission. + // Follows the same format of [Binding.members][google.iam.v1.Binding.members]. + ExemptedMembers []string `protobuf:"bytes,2,rep,name=exempted_members,json=exemptedMembers,proto3" json:"exempted_members,omitempty"` +} + +func (x *AuditLogConfig) Reset() { + *x = AuditLogConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuditLogConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuditLogConfig) ProtoMessage() {} + +func (x *AuditLogConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuditLogConfig.ProtoReflect.Descriptor instead. +func (*AuditLogConfig) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{3} +} + +func (x *AuditLogConfig) GetLogType() AuditLogConfig_LogType { + if x != nil { + return x.LogType + } + return AuditLogConfig_LOG_TYPE_UNSPECIFIED +} + +func (x *AuditLogConfig) GetExemptedMembers() []string { + if x != nil { + return x.ExemptedMembers + } + return nil +} + +// The difference delta between two policies. +type PolicyDelta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The delta for Bindings between two policies. + BindingDeltas []*BindingDelta `protobuf:"bytes,1,rep,name=binding_deltas,json=bindingDeltas,proto3" json:"binding_deltas,omitempty"` + // The delta for AuditConfigs between two policies. + AuditConfigDeltas []*AuditConfigDelta `protobuf:"bytes,2,rep,name=audit_config_deltas,json=auditConfigDeltas,proto3" json:"audit_config_deltas,omitempty"` +} + +func (x *PolicyDelta) Reset() { + *x = PolicyDelta{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PolicyDelta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PolicyDelta) ProtoMessage() {} + +func (x *PolicyDelta) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PolicyDelta.ProtoReflect.Descriptor instead. +func (*PolicyDelta) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{4} +} + +func (x *PolicyDelta) GetBindingDeltas() []*BindingDelta { + if x != nil { + return x.BindingDeltas + } + return nil +} + +func (x *PolicyDelta) GetAuditConfigDeltas() []*AuditConfigDelta { + if x != nil { + return x.AuditConfigDeltas + } + return nil +} + +// One delta entry for Binding. Each individual change (only one member in each +// entry) to a binding will be a separate entry. +type BindingDelta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The action that was performed on a Binding. + // Required + Action BindingDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.BindingDelta_Action" json:"action,omitempty"` + // Role that is assigned to `members`. + // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Required + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` + // A single identity requesting access for a Cloud Platform resource. + // Follows the same format of Binding.members. + // Required + Member string `protobuf:"bytes,3,opt,name=member,proto3" json:"member,omitempty"` + // The condition that is associated with this binding. + Condition *expr.Expr `protobuf:"bytes,4,opt,name=condition,proto3" json:"condition,omitempty"` +} + +func (x *BindingDelta) Reset() { + *x = BindingDelta{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BindingDelta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BindingDelta) ProtoMessage() {} + +func (x *BindingDelta) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BindingDelta.ProtoReflect.Descriptor instead. +func (*BindingDelta) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{5} +} + +func (x *BindingDelta) GetAction() BindingDelta_Action { + if x != nil { + return x.Action + } + return BindingDelta_ACTION_UNSPECIFIED +} + +func (x *BindingDelta) GetRole() string { + if x != nil { + return x.Role + } + return "" +} + +func (x *BindingDelta) GetMember() string { + if x != nil { + return x.Member + } + return "" +} + +func (x *BindingDelta) GetCondition() *expr.Expr { + if x != nil { + return x.Condition + } + return nil +} + +// One delta entry for AuditConfig. Each individual change (only one +// exempted_member in each entry) to a AuditConfig will be a separate entry. +type AuditConfigDelta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The action that was performed on an audit configuration in a policy. + // Required + Action AuditConfigDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.AuditConfigDelta_Action" json:"action,omitempty"` + // Specifies a service that was configured for Cloud Audit Logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. + // `allServices` is a special value that covers all services. + // Required + Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` + // A single identity that is exempted from "data access" audit + // logging for the `service` specified above. + // Follows the same format of Binding.members. + ExemptedMember string `protobuf:"bytes,3,opt,name=exempted_member,json=exemptedMember,proto3" json:"exempted_member,omitempty"` + // Specifies the log_type that was be enabled. ADMIN_ACTIVITY is always + // enabled, and cannot be configured. + // Required + LogType string `protobuf:"bytes,4,opt,name=log_type,json=logType,proto3" json:"log_type,omitempty"` +} + +func (x *AuditConfigDelta) Reset() { + *x = AuditConfigDelta{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuditConfigDelta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuditConfigDelta) ProtoMessage() {} + +func (x *AuditConfigDelta) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuditConfigDelta.ProtoReflect.Descriptor instead. +func (*AuditConfigDelta) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{6} +} + +func (x *AuditConfigDelta) GetAction() AuditConfigDelta_Action { + if x != nil { + return x.Action + } + return AuditConfigDelta_ACTION_UNSPECIFIED +} + +func (x *AuditConfigDelta) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *AuditConfigDelta) GetExemptedMember() string { + if x != nil { + return x.ExemptedMember + } + return "" +} + +func (x *AuditConfigDelta) GetLogType() string { + if x != nil { + return x.LogType + } + return "" +} + +var File_google_iam_v1_policy_proto protoreflect.FileDescriptor + +var file_google_iam_v1_policy_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, 0x16, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x01, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x08, 0x62, 0x69, 0x6e, 0x64, + 0x69, 0x6e, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, + 0x6e, 0x67, 0x52, 0x08, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3f, 0x0a, 0x0d, + 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, + 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0c, 0x61, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x12, 0x0a, + 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x22, 0x68, 0x0a, 0x07, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, + 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x72, + 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x72, 0x0a, 0x0b, 0x41, + 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x11, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, + 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, + 0x61, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x22, + 0xd1, 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x40, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, + 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6c, 0x6f, 0x67, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, + 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, + 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x22, + 0x52, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x4c, 0x4f, + 0x47, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x44, 0x4d, 0x49, 0x4e, 0x5f, 0x52, 0x45, + 0x41, 0x44, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x57, 0x52, 0x49, + 0x54, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x52, 0x45, 0x41, + 0x44, 0x10, 0x03, 0x22, 0xa2, 0x01, 0x0a, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x44, 0x65, + 0x6c, 0x74, 0x61, 0x12, 0x42, 0x0a, 0x0e, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, + 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, + 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x0d, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, + 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, 0x4f, 0x0a, 0x13, 0x61, 0x75, 0x64, 0x69, 0x74, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, + 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x11, 0x61, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x22, 0xde, 0x01, 0x0a, 0x0c, 0x42, 0x69, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x06, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, + 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x35, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, + 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, + 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x02, 0x22, 0xe7, 0x01, 0x0a, 0x10, 0x41, 0x75, + 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x3e, + 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, + 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x2e, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x78, 0x65, 0x6d, + 0x70, 0x74, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x22, 0x35, 0x0a, 0x06, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, + 0x45, 0x10, 0x02, 0x42, 0x83, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, + 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, + 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_iam_v1_policy_proto_rawDescOnce sync.Once + file_google_iam_v1_policy_proto_rawDescData = file_google_iam_v1_policy_proto_rawDesc +) + +func file_google_iam_v1_policy_proto_rawDescGZIP() []byte { + file_google_iam_v1_policy_proto_rawDescOnce.Do(func() { + file_google_iam_v1_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_policy_proto_rawDescData) + }) + return file_google_iam_v1_policy_proto_rawDescData +} + +var file_google_iam_v1_policy_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_google_iam_v1_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_google_iam_v1_policy_proto_goTypes = []interface{}{ + (AuditLogConfig_LogType)(0), // 0: google.iam.v1.AuditLogConfig.LogType + (BindingDelta_Action)(0), // 1: google.iam.v1.BindingDelta.Action + (AuditConfigDelta_Action)(0), // 2: google.iam.v1.AuditConfigDelta.Action + (*Policy)(nil), // 3: google.iam.v1.Policy + (*Binding)(nil), // 4: google.iam.v1.Binding + (*AuditConfig)(nil), // 5: google.iam.v1.AuditConfig + (*AuditLogConfig)(nil), // 6: google.iam.v1.AuditLogConfig + (*PolicyDelta)(nil), // 7: google.iam.v1.PolicyDelta + (*BindingDelta)(nil), // 8: google.iam.v1.BindingDelta + (*AuditConfigDelta)(nil), // 9: google.iam.v1.AuditConfigDelta + (*expr.Expr)(nil), // 10: google.type.Expr +} +var file_google_iam_v1_policy_proto_depIdxs = []int32{ + 4, // 0: google.iam.v1.Policy.bindings:type_name -> google.iam.v1.Binding + 5, // 1: google.iam.v1.Policy.audit_configs:type_name -> google.iam.v1.AuditConfig + 10, // 2: google.iam.v1.Binding.condition:type_name -> google.type.Expr + 6, // 3: google.iam.v1.AuditConfig.audit_log_configs:type_name -> google.iam.v1.AuditLogConfig + 0, // 4: google.iam.v1.AuditLogConfig.log_type:type_name -> google.iam.v1.AuditLogConfig.LogType + 8, // 5: google.iam.v1.PolicyDelta.binding_deltas:type_name -> google.iam.v1.BindingDelta + 9, // 6: google.iam.v1.PolicyDelta.audit_config_deltas:type_name -> google.iam.v1.AuditConfigDelta + 1, // 7: google.iam.v1.BindingDelta.action:type_name -> google.iam.v1.BindingDelta.Action + 10, // 8: google.iam.v1.BindingDelta.condition:type_name -> google.type.Expr + 2, // 9: google.iam.v1.AuditConfigDelta.action:type_name -> google.iam.v1.AuditConfigDelta.Action + 10, // [10:10] is the sub-list for method output_type + 10, // [10:10] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name +} + +func init() { file_google_iam_v1_policy_proto_init() } +func file_google_iam_v1_policy_proto_init() { + if File_google_iam_v1_policy_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Policy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Binding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuditConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuditLogConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PolicyDelta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BindingDelta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuditConfigDelta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_iam_v1_policy_proto_rawDesc, + NumEnums: 3, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_iam_v1_policy_proto_goTypes, + DependencyIndexes: file_google_iam_v1_policy_proto_depIdxs, + EnumInfos: file_google_iam_v1_policy_proto_enumTypes, + MessageInfos: file_google_iam_v1_policy_proto_msgTypes, + }.Build() + File_google_iam_v1_policy_proto = out.File + file_google_iam_v1_policy_proto_rawDesc = nil + file_google_iam_v1_policy_proto_goTypes = nil + file_google_iam_v1_policy_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go new file mode 100644 index 00000000000..0a06ea2e84d --- /dev/null +++ b/vendor/cloud.google.com/go/iam/iam.go @@ -0,0 +1,387 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package iam supports the resource-specific operations of Google Cloud +// IAM (Identity and Access Management) for the Google Cloud Libraries. +// See https://cloud.google.com/iam for more about IAM. +// +// Users of the Google Cloud Libraries will typically not use this package +// directly. Instead they will begin with some resource that supports IAM, like +// a pubsub topic, and call its IAM method to get a Handle for that resource. +package iam + +import ( + "context" + "fmt" + "time" + + gax "github.com/googleapis/gax-go/v2" + pb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// client abstracts the IAMPolicy API to allow multiple implementations. +type client interface { + Get(ctx context.Context, resource string) (*pb.Policy, error) + Set(ctx context.Context, resource string, p *pb.Policy) error + Test(ctx context.Context, resource string, perms []string) ([]string, error) + GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error) +} + +// grpcClient implements client for the standard gRPC-based IAMPolicy service. +type grpcClient struct { + c pb.IAMPolicyClient +} + +var withRetry = gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60 * time.Second, + Multiplier: 1.3, + }) +}) + +func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { + return g.GetWithVersion(ctx, resource, 1) +} + +func (g *grpcClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error) { + var proto *pb.Policy + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) + ctx = insertMetadata(ctx, md) + + err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + var err error + proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{ + Resource: resource, + Options: &pb.GetPolicyOptions{ + RequestedPolicyVersion: requestedPolicyVersion, + }, + }) + return err + }, withRetry) + if err != nil { + return nil, err + } + return proto, nil +} + +func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) + ctx = insertMetadata(ctx, md) + + return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + _, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ + Resource: resource, + Policy: p, + }) + return err + }, withRetry) +} + +func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { + var res *pb.TestIamPermissionsResponse + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) + ctx = insertMetadata(ctx, md) + + err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + var err error + res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ + Resource: resource, + Permissions: perms, + }) + return err + }, withRetry) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +// A Handle provides IAM operations for a resource. +type Handle struct { + c client + resource string +} + +// A Handle3 provides IAM operations for a resource. It is similar to a Handle, but provides access to newer IAM features (e.g., conditions). +type Handle3 struct { + c client + resource string + version int32 +} + +// InternalNewHandle is for use by the Google Cloud Libraries only. +// +// InternalNewHandle returns a Handle for resource. +// The conn parameter refers to a server that must support the IAMPolicy service. +func InternalNewHandle(conn grpc.ClientConnInterface, resource string) *Handle { + return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource) +} + +// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only. +// +// InternalNewHandleClient returns a Handle for resource using the given +// grpc service that implements IAM as a mixin +func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle { + return InternalNewHandleClient(&grpcClient{c: c}, resource) +} + +// InternalNewHandleClient is for use by the Google Cloud Libraries only. +// +// InternalNewHandleClient returns a Handle for resource using the given +// client implementation. +func InternalNewHandleClient(c client, resource string) *Handle { + return &Handle{ + c: c, + resource: resource, + } +} + +// V3 returns a Handle3, which is like Handle except it sets +// requestedPolicyVersion to 3 when retrieving a policy and policy.version to 3 +// when storing a policy. +func (h *Handle) V3() *Handle3 { + return &Handle3{ + c: h.c, + resource: h.resource, + version: 3, + } +} + +// Policy retrieves the IAM policy for the resource. +func (h *Handle) Policy(ctx context.Context) (*Policy, error) { + proto, err := h.c.Get(ctx, h.resource) + if err != nil { + return nil, err + } + return &Policy{InternalProto: proto}, nil +} + +// SetPolicy replaces the resource's current policy with the supplied Policy. +// +// If policy was created from a prior call to Get, then the modification will +// only succeed if the policy has not changed since the Get. +func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error { + return h.c.Set(ctx, h.resource, policy.InternalProto) +} + +// TestPermissions returns the subset of permissions that the caller has on the resource. +func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { + return h.c.Test(ctx, h.resource, permissions) +} + +// A RoleName is a name representing a collection of permissions. +type RoleName string + +// Common role names. +const ( + Owner RoleName = "roles/owner" + Editor RoleName = "roles/editor" + Viewer RoleName = "roles/viewer" +) + +const ( + // AllUsers is a special member that denotes all users, even unauthenticated ones. + AllUsers = "allUsers" + + // AllAuthenticatedUsers is a special member that denotes all authenticated users. + AllAuthenticatedUsers = "allAuthenticatedUsers" +) + +// A Policy is a list of Bindings representing roles +// granted to members. +// +// The zero Policy is a valid policy with no bindings. +type Policy struct { + // TODO(jba): when type aliases are available, put Policy into an internal package + // and provide an exported alias here. + + // This field is exported for use by the Google Cloud Libraries only. + // It may become unexported in a future release. + InternalProto *pb.Policy +} + +// Members returns the list of members with the supplied role. +// The return value should not be modified. Use Add and Remove +// to modify the members of a role. +func (p *Policy) Members(r RoleName) []string { + b := p.binding(r) + if b == nil { + return nil + } + return b.Members +} + +// HasRole reports whether member has role r. +func (p *Policy) HasRole(member string, r RoleName) bool { + return memberIndex(member, p.binding(r)) >= 0 +} + +// Add adds member member to role r if it is not already present. +// A new binding is created if there is no binding for the role. +func (p *Policy) Add(member string, r RoleName) { + b := p.binding(r) + if b == nil { + if p.InternalProto == nil { + p.InternalProto = &pb.Policy{} + } + p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{ + Role: string(r), + Members: []string{member}, + }) + return + } + if memberIndex(member, b) < 0 { + b.Members = append(b.Members, member) + return + } +} + +// Remove removes member from role r if it is present. +func (p *Policy) Remove(member string, r RoleName) { + bi := p.bindingIndex(r) + if bi < 0 { + return + } + bindings := p.InternalProto.Bindings + b := bindings[bi] + mi := memberIndex(member, b) + if mi < 0 { + return + } + // Order doesn't matter for bindings or members, so to remove, move the last item + // into the removed spot and shrink the slice. + if len(b.Members) == 1 { + // Remove binding. + last := len(bindings) - 1 + bindings[bi] = bindings[last] + bindings[last] = nil + p.InternalProto.Bindings = bindings[:last] + return + } + // Remove member. + // TODO(jba): worry about multiple copies of m? + last := len(b.Members) - 1 + b.Members[mi] = b.Members[last] + b.Members[last] = "" + b.Members = b.Members[:last] +} + +// Roles returns the names of all the roles that appear in the Policy. +func (p *Policy) Roles() []RoleName { + if p.InternalProto == nil { + return nil + } + var rns []RoleName + for _, b := range p.InternalProto.Bindings { + rns = append(rns, RoleName(b.Role)) + } + return rns +} + +// binding returns the Binding for the suppied role, or nil if there isn't one. +func (p *Policy) binding(r RoleName) *pb.Binding { + i := p.bindingIndex(r) + if i < 0 { + return nil + } + return p.InternalProto.Bindings[i] +} + +func (p *Policy) bindingIndex(r RoleName) int { + if p.InternalProto == nil { + return -1 + } + for i, b := range p.InternalProto.Bindings { + if b.Role == string(r) { + return i + } + } + return -1 +} + +// memberIndex returns the index of m in b's Members, or -1 if not found. +func memberIndex(m string, b *pb.Binding) int { + if b == nil { + return -1 + } + for i, mm := range b.Members { + if mm == m { + return i + } + } + return -1 +} + +// insertMetadata inserts metadata into the given context +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// A Policy3 is a list of Bindings representing roles granted to members. +// +// The zero Policy3 is a valid policy with no bindings. +// +// It is similar to a Policy, except a Policy3 provides direct access to the +// list of Bindings. +// +// The policy version is always set to 3. +type Policy3 struct { + etag []byte + Bindings []*pb.Binding +} + +// Policy retrieves the IAM policy for the resource. +// +// requestedPolicyVersion is always set to 3. +func (h *Handle3) Policy(ctx context.Context) (*Policy3, error) { + proto, err := h.c.GetWithVersion(ctx, h.resource, h.version) + if err != nil { + return nil, err + } + return &Policy3{ + Bindings: proto.Bindings, + etag: proto.Etag, + }, nil +} + +// SetPolicy replaces the resource's current policy with the supplied Policy. +// +// If policy was created from a prior call to Get, then the modification will +// only succeed if the policy has not changed since the Get. +func (h *Handle3) SetPolicy(ctx context.Context, policy *Policy3) error { + return h.c.Set(ctx, h.resource, &pb.Policy{ + Bindings: policy.Bindings, + Etag: policy.etag, + Version: h.version, + }) +} + +// TestPermissions returns the subset of permissions that the caller has on the resource. +func (h *Handle3) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { + return h.c.Test(ctx, h.resource, permissions) +} diff --git a/vendor/cloud.google.com/go/kms/LICENSE b/vendor/cloud.google.com/go/kms/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/kms/apiv1/doc.go b/vendor/cloud.google.com/go/kms/apiv1/doc.go new file mode 100644 index 00000000000..53d50e6a50b --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/doc.go @@ -0,0 +1,178 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +// Package kms is an auto-generated package for the +// Cloud Key Management Service (KMS) API. +// +// Manages keys and performs cryptographic operations in a central cloud +// service, for direct use by other cloud resources and applications. +// +// # Example usage +// +// To get started with this package, create a client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := kms.NewEkmClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// The client will use your default application credentials. Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +// The returned client must be Closed when it is done being used. +// +// # Using the Client +// +// The following is an example of making an API call with the newly created client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := kms.NewEkmClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// req := &kmspb.ListEkmConnectionsRequest{ +// // TODO: Fill request struct fields. +// // See https://pkg.go.dev/cloud.google.com/go/kms/apiv1/kmspb#ListEkmConnectionsRequest. +// } +// it := c.ListEkmConnections(ctx, req) +// for { +// resp, err := it.Next() +// if err == iterator.Done { +// break +// } +// if err != nil { +// // TODO: Handle error. +// } +// // TODO: Use resp. +// _ = resp +// } +// +// # Use of Context +// +// The ctx passed to NewEkmClient is used for authentication requests and +// for creating the underlying connection, but is not used for subsequent calls. +// Individual methods on the client use the ctx given to them. +// +// To close the open connection, use the Close() method. +// +// For information about setting deadlines, reusing contexts, and more +// please visit https://pkg.go.dev/cloud.google.com/go. +package kms // import "cloud.google.com/go/kms/apiv1" + +import ( + "context" + "os" + "runtime" + "strconv" + "strings" + "unicode" + + "google.golang.org/api/option" + "google.golang.org/grpc/metadata" +) + +// For more information on implementing a client constructor hook, see +// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. +type clientHookParams struct{} +type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) + +var versionClient string + +func getVersionClient() string { + if versionClient == "" { + return "UNKNOWN" + } + return versionClient +} + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +func checkDisableDeadlines() (bool, error) { + raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE") + if !ok { + return false, nil + } + + b, err := strconv.ParseBool(raw) + return b, err +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudkms", + } +} + +// versionGo returns the Go runtime version. The returned string +// has no whitespace, suitable for reporting in header. +func versionGo() string { + const develPrefix = "devel +" + + s := runtime.Version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "UNKNOWN" +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go b/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go new file mode 100644 index 00000000000..e4f37f767fb --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go @@ -0,0 +1,647 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + kmspb "cloud.google.com/go/kms/apiv1/kmspb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + locationpb "google.golang.org/genproto/googleapis/cloud/location" + iampb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" +) + +var newEkmClientHook clientHook + +// EkmCallOptions contains the retry settings for each method of EkmClient. +type EkmCallOptions struct { + ListEkmConnections []gax.CallOption + GetEkmConnection []gax.CallOption + CreateEkmConnection []gax.CallOption + UpdateEkmConnection []gax.CallOption + GetLocation []gax.CallOption + ListLocations []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption +} + +func defaultEkmGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("cloudkms.googleapis.com:443"), + internaloption.WithDefaultMTLSEndpoint("cloudkms.mtls.googleapis.com:443"), + internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultEkmCallOptions() *EkmCallOptions { + return &EkmCallOptions{ + ListEkmConnections: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetEkmConnection: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateEkmConnection: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateEkmConnection: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetLocation: []gax.CallOption{}, + ListLocations: []gax.CallOption{}, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, + } +} + +// internalEkmClient is an interface that defines the methods available from Cloud Key Management Service (KMS) API. +type internalEkmClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListEkmConnections(context.Context, *kmspb.ListEkmConnectionsRequest, ...gax.CallOption) *EkmConnectionIterator + GetEkmConnection(context.Context, *kmspb.GetEkmConnectionRequest, ...gax.CallOption) (*kmspb.EkmConnection, error) + CreateEkmConnection(context.Context, *kmspb.CreateEkmConnectionRequest, ...gax.CallOption) (*kmspb.EkmConnection, error) + UpdateEkmConnection(context.Context, *kmspb.UpdateEkmConnectionRequest, ...gax.CallOption) (*kmspb.EkmConnection, error) + GetLocation(context.Context, *locationpb.GetLocationRequest, ...gax.CallOption) (*locationpb.Location, error) + ListLocations(context.Context, *locationpb.ListLocationsRequest, ...gax.CallOption) *LocationIterator + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) +} + +// EkmClient is a client for interacting with Cloud Key Management Service (KMS) API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// # Google Cloud Key Management EKM Service +// +// Manages external cryptographic keys and operations using those keys. +// Implements a REST model with the following objects: +// +// EkmConnection +type EkmClient struct { + // The internal transport-dependent client. + internalClient internalEkmClient + + // The call options for this service. + CallOptions *EkmCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *EkmClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *EkmClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *EkmClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListEkmConnections lists EkmConnections. +func (c *EkmClient) ListEkmConnections(ctx context.Context, req *kmspb.ListEkmConnectionsRequest, opts ...gax.CallOption) *EkmConnectionIterator { + return c.internalClient.ListEkmConnections(ctx, req, opts...) +} + +// GetEkmConnection returns metadata for a given +// EkmConnection. +func (c *EkmClient) GetEkmConnection(ctx context.Context, req *kmspb.GetEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { + return c.internalClient.GetEkmConnection(ctx, req, opts...) +} + +// CreateEkmConnection creates a new EkmConnection in a given +// Project and Location. +func (c *EkmClient) CreateEkmConnection(ctx context.Context, req *kmspb.CreateEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { + return c.internalClient.CreateEkmConnection(ctx, req, opts...) +} + +// UpdateEkmConnection updates an EkmConnection's metadata. +func (c *EkmClient) UpdateEkmConnection(ctx context.Context, req *kmspb.UpdateEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { + return c.internalClient.UpdateEkmConnection(ctx, req, opts...) +} + +// GetLocation gets information about a location. +func (c *EkmClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + return c.internalClient.GetLocation(ctx, req, opts...) +} + +// ListLocations lists information about the supported locations for this service. +func (c *EkmClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + return c.internalClient.ListLocations(ctx, req, opts...) +} + +// GetIamPolicy gets the access control policy for a resource. Returns an empty policy +// if the resource exists and does not have a policy set. +func (c *EkmClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.GetIamPolicy(ctx, req, opts...) +} + +// SetIamPolicy sets the access control policy on the specified resource. Replaces +// any existing policy. +// +// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED +// errors. +func (c *EkmClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.SetIamPolicy(ctx, req, opts...) +} + +// TestIamPermissions returns permissions that a caller has on the specified resource. If the +// resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware UIs and command-line tools, not for authorization +// checking. This operation may “fail open” without warning. +func (c *EkmClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + return c.internalClient.TestIamPermissions(ctx, req, opts...) +} + +// ekmGRPCClient is a client for interacting with Cloud Key Management Service (KMS) API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type ekmGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE + disableDeadlines bool + + // Points back to the CallOptions field of the containing EkmClient + CallOptions **EkmCallOptions + + // The gRPC API client. + ekmClient kmspb.EkmServiceClient + + iamPolicyClient iampb.IAMPolicyClient + + locationsClient locationpb.LocationsClient + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewEkmClient creates a new ekm service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// # Google Cloud Key Management EKM Service +// +// Manages external cryptographic keys and operations using those keys. +// Implements a REST model with the following objects: +// +// EkmConnection +func NewEkmClient(ctx context.Context, opts ...option.ClientOption) (*EkmClient, error) { + clientOpts := defaultEkmGRPCClientOptions() + if newEkmClientHook != nil { + hookOpts, err := newEkmClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + disableDeadlines, err := checkDisableDeadlines() + if err != nil { + return nil, err + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := EkmClient{CallOptions: defaultEkmCallOptions()} + + c := &ekmGRPCClient{ + connPool: connPool, + disableDeadlines: disableDeadlines, + ekmClient: kmspb.NewEkmServiceClient(connPool), + CallOptions: &client.CallOptions, + iamPolicyClient: iampb.NewIAMPolicyClient(connPool), + locationsClient: locationpb.NewLocationsClient(connPool), + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *ekmGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ekmGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ekmGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *ekmGRPCClient) ListEkmConnections(ctx context.Context, req *kmspb.ListEkmConnectionsRequest, opts ...gax.CallOption) *EkmConnectionIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListEkmConnections[0:len((*c.CallOptions).ListEkmConnections):len((*c.CallOptions).ListEkmConnections)], opts...) + it := &EkmConnectionIterator{} + req = proto.Clone(req).(*kmspb.ListEkmConnectionsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.EkmConnection, string, error) { + resp := &kmspb.ListEkmConnectionsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.ekmClient.ListEkmConnections(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetEkmConnections(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *ekmGRPCClient) GetEkmConnection(ctx context.Context, req *kmspb.GetEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetEkmConnection[0:len((*c.CallOptions).GetEkmConnection):len((*c.CallOptions).GetEkmConnection)], opts...) + var resp *kmspb.EkmConnection + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.ekmClient.GetEkmConnection(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) CreateEkmConnection(ctx context.Context, req *kmspb.CreateEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CreateEkmConnection[0:len((*c.CallOptions).CreateEkmConnection):len((*c.CallOptions).CreateEkmConnection)], opts...) + var resp *kmspb.EkmConnection + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.ekmClient.CreateEkmConnection(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) UpdateEkmConnection(ctx context.Context, req *kmspb.UpdateEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "ekm_connection.name", url.QueryEscape(req.GetEkmConnection().GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).UpdateEkmConnection[0:len((*c.CallOptions).UpdateEkmConnection):len((*c.CallOptions).UpdateEkmConnection)], opts...) + var resp *kmspb.EkmConnection + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.ekmClient.UpdateEkmConnection(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...) + var resp *locationpb.Location + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListLocations[0:len((*c.CallOptions).ListLocations):len((*c.CallOptions).ListLocations)], opts...) + it := &LocationIterator{} + req = proto.Clone(req).(*locationpb.ListLocationsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) { + resp := &locationpb.ListLocationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetLocations(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *ekmGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// EkmConnectionIterator manages a stream of *kmspb.EkmConnection. +type EkmConnectionIterator struct { + items []*kmspb.EkmConnection + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*kmspb.EkmConnection, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *EkmConnectionIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *EkmConnectionIterator) Next() (*kmspb.EkmConnection, error) { + var item *kmspb.EkmConnection + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *EkmConnectionIterator) bufLen() int { + return len(it.items) +} + +func (it *EkmConnectionIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// LocationIterator manages a stream of *locationpb.Location. +type LocationIterator struct { + items []*locationpb.Location + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*locationpb.Location, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LocationIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LocationIterator) Next() (*locationpb.Location, error) { + var item *locationpb.Location + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LocationIterator) bufLen() int { + return len(it.items) +} + +func (it *LocationIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json b/vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json new file mode 100644 index 00000000000..5b9024e715f --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json @@ -0,0 +1,227 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", + "language": "go", + "protoPackage": "google.cloud.kms.v1", + "libraryPackage": "cloud.google.com/go/kms/apiv1", + "services": { + "EkmService": { + "clients": { + "grpc": { + "libraryClient": "EkmClient", + "rpcs": { + "CreateEkmConnection": { + "methods": [ + "CreateEkmConnection" + ] + }, + "GetEkmConnection": { + "methods": [ + "GetEkmConnection" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetLocation": { + "methods": [ + "GetLocation" + ] + }, + "ListEkmConnections": { + "methods": [ + "ListEkmConnections" + ] + }, + "ListLocations": { + "methods": [ + "ListLocations" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + }, + "UpdateEkmConnection": { + "methods": [ + "UpdateEkmConnection" + ] + } + } + } + } + }, + "KeyManagementService": { + "clients": { + "grpc": { + "libraryClient": "KeyManagementClient", + "rpcs": { + "AsymmetricDecrypt": { + "methods": [ + "AsymmetricDecrypt" + ] + }, + "AsymmetricSign": { + "methods": [ + "AsymmetricSign" + ] + }, + "CreateCryptoKey": { + "methods": [ + "CreateCryptoKey" + ] + }, + "CreateCryptoKeyVersion": { + "methods": [ + "CreateCryptoKeyVersion" + ] + }, + "CreateImportJob": { + "methods": [ + "CreateImportJob" + ] + }, + "CreateKeyRing": { + "methods": [ + "CreateKeyRing" + ] + }, + "Decrypt": { + "methods": [ + "Decrypt" + ] + }, + "DestroyCryptoKeyVersion": { + "methods": [ + "DestroyCryptoKeyVersion" + ] + }, + "Encrypt": { + "methods": [ + "Encrypt" + ] + }, + "GenerateRandomBytes": { + "methods": [ + "GenerateRandomBytes" + ] + }, + "GetCryptoKey": { + "methods": [ + "GetCryptoKey" + ] + }, + "GetCryptoKeyVersion": { + "methods": [ + "GetCryptoKeyVersion" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetImportJob": { + "methods": [ + "GetImportJob" + ] + }, + "GetKeyRing": { + "methods": [ + "GetKeyRing" + ] + }, + "GetLocation": { + "methods": [ + "GetLocation" + ] + }, + "GetPublicKey": { + "methods": [ + "GetPublicKey" + ] + }, + "ImportCryptoKeyVersion": { + "methods": [ + "ImportCryptoKeyVersion" + ] + }, + "ListCryptoKeyVersions": { + "methods": [ + "ListCryptoKeyVersions" + ] + }, + "ListCryptoKeys": { + "methods": [ + "ListCryptoKeys" + ] + }, + "ListImportJobs": { + "methods": [ + "ListImportJobs" + ] + }, + "ListKeyRings": { + "methods": [ + "ListKeyRings" + ] + }, + "ListLocations": { + "methods": [ + "ListLocations" + ] + }, + "MacSign": { + "methods": [ + "MacSign" + ] + }, + "MacVerify": { + "methods": [ + "MacVerify" + ] + }, + "RestoreCryptoKeyVersion": { + "methods": [ + "RestoreCryptoKeyVersion" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + }, + "UpdateCryptoKey": { + "methods": [ + "UpdateCryptoKey" + ] + }, + "UpdateCryptoKeyPrimaryVersion": { + "methods": [ + "UpdateCryptoKeyPrimaryVersion" + ] + }, + "UpdateCryptoKeyVersion": { + "methods": [ + "UpdateCryptoKeyVersion" + ] + } + } + } + } + } + } +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/iam.go b/vendor/cloud.google.com/go/kms/apiv1/iam.go new file mode 100644 index 00000000000..9fb2a6ab3b9 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/iam.go @@ -0,0 +1,40 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kms + +import ( + "cloud.google.com/go/iam" + kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1" +) + +// KeyRingIAM returns a handle to inspect and change permissions of a KeyRing. +// +// Deprecated: Please use ResourceIAM and provide the KeyRing.Name as input. +func (c *KeyManagementClient) KeyRingIAM(keyRing *kmspb.KeyRing) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), keyRing.Name) +} + +// CryptoKeyIAM returns a handle to inspect and change permissions of a CryptoKey. +// +// Deprecated: Please use ResourceIAM and provide the CryptoKey.Name as input. +func (c *KeyManagementClient) CryptoKeyIAM(cryptoKey *kmspb.CryptoKey) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), cryptoKey.Name) +} + +// ResourceIAM returns a handle to inspect and change permissions of the resource +// indicated by the given resource path. +func (c *KeyManagementClient) ResourceIAM(resourcePath string) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), resourcePath) +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go new file mode 100644 index 00000000000..3581887971a --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go @@ -0,0 +1,1800 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + kmspb "cloud.google.com/go/kms/apiv1/kmspb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + locationpb "google.golang.org/genproto/googleapis/cloud/location" + iampb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" +) + +var newKeyManagementClientHook clientHook + +// KeyManagementCallOptions contains the retry settings for each method of KeyManagementClient. +type KeyManagementCallOptions struct { + ListKeyRings []gax.CallOption + ListCryptoKeys []gax.CallOption + ListCryptoKeyVersions []gax.CallOption + ListImportJobs []gax.CallOption + GetKeyRing []gax.CallOption + GetCryptoKey []gax.CallOption + GetCryptoKeyVersion []gax.CallOption + GetPublicKey []gax.CallOption + GetImportJob []gax.CallOption + CreateKeyRing []gax.CallOption + CreateCryptoKey []gax.CallOption + CreateCryptoKeyVersion []gax.CallOption + ImportCryptoKeyVersion []gax.CallOption + CreateImportJob []gax.CallOption + UpdateCryptoKey []gax.CallOption + UpdateCryptoKeyVersion []gax.CallOption + UpdateCryptoKeyPrimaryVersion []gax.CallOption + DestroyCryptoKeyVersion []gax.CallOption + RestoreCryptoKeyVersion []gax.CallOption + Encrypt []gax.CallOption + Decrypt []gax.CallOption + AsymmetricSign []gax.CallOption + AsymmetricDecrypt []gax.CallOption + MacSign []gax.CallOption + MacVerify []gax.CallOption + GenerateRandomBytes []gax.CallOption + GetLocation []gax.CallOption + ListLocations []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption +} + +func defaultKeyManagementGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("cloudkms.googleapis.com:443"), + internaloption.WithDefaultMTLSEndpoint("cloudkms.mtls.googleapis.com:443"), + internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultKeyManagementCallOptions() *KeyManagementCallOptions { + return &KeyManagementCallOptions{ + ListKeyRings: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListCryptoKeys: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListCryptoKeyVersions: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListImportJobs: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetKeyRing: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetCryptoKey: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetCryptoKeyVersion: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetPublicKey: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetImportJob: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateKeyRing: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateCryptoKey: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateCryptoKeyVersion: []gax.CallOption{}, + ImportCryptoKeyVersion: []gax.CallOption{}, + CreateImportJob: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateCryptoKey: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateCryptoKeyVersion: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateCryptoKeyPrimaryVersion: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + DestroyCryptoKeyVersion: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + RestoreCryptoKeyVersion: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + Encrypt: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + Decrypt: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + AsymmetricSign: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + AsymmetricDecrypt: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + MacSign: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + MacVerify: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GenerateRandomBytes: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetLocation: []gax.CallOption{}, + ListLocations: []gax.CallOption{}, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, + } +} + +// internalKeyManagementClient is an interface that defines the methods available from Cloud Key Management Service (KMS) API. +type internalKeyManagementClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListKeyRings(context.Context, *kmspb.ListKeyRingsRequest, ...gax.CallOption) *KeyRingIterator + ListCryptoKeys(context.Context, *kmspb.ListCryptoKeysRequest, ...gax.CallOption) *CryptoKeyIterator + ListCryptoKeyVersions(context.Context, *kmspb.ListCryptoKeyVersionsRequest, ...gax.CallOption) *CryptoKeyVersionIterator + ListImportJobs(context.Context, *kmspb.ListImportJobsRequest, ...gax.CallOption) *ImportJobIterator + GetKeyRing(context.Context, *kmspb.GetKeyRingRequest, ...gax.CallOption) (*kmspb.KeyRing, error) + GetCryptoKey(context.Context, *kmspb.GetCryptoKeyRequest, ...gax.CallOption) (*kmspb.CryptoKey, error) + GetCryptoKeyVersion(context.Context, *kmspb.GetCryptoKeyVersionRequest, ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) + GetPublicKey(context.Context, *kmspb.GetPublicKeyRequest, ...gax.CallOption) (*kmspb.PublicKey, error) + GetImportJob(context.Context, *kmspb.GetImportJobRequest, ...gax.CallOption) (*kmspb.ImportJob, error) + CreateKeyRing(context.Context, *kmspb.CreateKeyRingRequest, ...gax.CallOption) (*kmspb.KeyRing, error) + CreateCryptoKey(context.Context, *kmspb.CreateCryptoKeyRequest, ...gax.CallOption) (*kmspb.CryptoKey, error) + CreateCryptoKeyVersion(context.Context, *kmspb.CreateCryptoKeyVersionRequest, ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) + ImportCryptoKeyVersion(context.Context, *kmspb.ImportCryptoKeyVersionRequest, ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) + CreateImportJob(context.Context, *kmspb.CreateImportJobRequest, ...gax.CallOption) (*kmspb.ImportJob, error) + UpdateCryptoKey(context.Context, *kmspb.UpdateCryptoKeyRequest, ...gax.CallOption) (*kmspb.CryptoKey, error) + UpdateCryptoKeyVersion(context.Context, *kmspb.UpdateCryptoKeyVersionRequest, ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) + UpdateCryptoKeyPrimaryVersion(context.Context, *kmspb.UpdateCryptoKeyPrimaryVersionRequest, ...gax.CallOption) (*kmspb.CryptoKey, error) + DestroyCryptoKeyVersion(context.Context, *kmspb.DestroyCryptoKeyVersionRequest, ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) + RestoreCryptoKeyVersion(context.Context, *kmspb.RestoreCryptoKeyVersionRequest, ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) + Encrypt(context.Context, *kmspb.EncryptRequest, ...gax.CallOption) (*kmspb.EncryptResponse, error) + Decrypt(context.Context, *kmspb.DecryptRequest, ...gax.CallOption) (*kmspb.DecryptResponse, error) + AsymmetricSign(context.Context, *kmspb.AsymmetricSignRequest, ...gax.CallOption) (*kmspb.AsymmetricSignResponse, error) + AsymmetricDecrypt(context.Context, *kmspb.AsymmetricDecryptRequest, ...gax.CallOption) (*kmspb.AsymmetricDecryptResponse, error) + MacSign(context.Context, *kmspb.MacSignRequest, ...gax.CallOption) (*kmspb.MacSignResponse, error) + MacVerify(context.Context, *kmspb.MacVerifyRequest, ...gax.CallOption) (*kmspb.MacVerifyResponse, error) + GenerateRandomBytes(context.Context, *kmspb.GenerateRandomBytesRequest, ...gax.CallOption) (*kmspb.GenerateRandomBytesResponse, error) + GetLocation(context.Context, *locationpb.GetLocationRequest, ...gax.CallOption) (*locationpb.Location, error) + ListLocations(context.Context, *locationpb.ListLocationsRequest, ...gax.CallOption) *LocationIterator + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) +} + +// KeyManagementClient is a client for interacting with Cloud Key Management Service (KMS) API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// # Google Cloud Key Management Service +// +// Manages cryptographic keys and operations using those keys. Implements a REST +// model with the following objects: +// +// KeyRing +// +// CryptoKey +// +// CryptoKeyVersion +// +// ImportJob +// +// If you are using manual gRPC libraries, see +// Using gRPC with Cloud KMS (at https://cloud.google.com/kms/docs/grpc). +type KeyManagementClient struct { + // The internal transport-dependent client. + internalClient internalKeyManagementClient + + // The call options for this service. + CallOptions *KeyManagementCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *KeyManagementClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *KeyManagementClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *KeyManagementClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListKeyRings lists KeyRings. +func (c *KeyManagementClient) ListKeyRings(ctx context.Context, req *kmspb.ListKeyRingsRequest, opts ...gax.CallOption) *KeyRingIterator { + return c.internalClient.ListKeyRings(ctx, req, opts...) +} + +// ListCryptoKeys lists CryptoKeys. +func (c *KeyManagementClient) ListCryptoKeys(ctx context.Context, req *kmspb.ListCryptoKeysRequest, opts ...gax.CallOption) *CryptoKeyIterator { + return c.internalClient.ListCryptoKeys(ctx, req, opts...) +} + +// ListCryptoKeyVersions lists CryptoKeyVersions. +func (c *KeyManagementClient) ListCryptoKeyVersions(ctx context.Context, req *kmspb.ListCryptoKeyVersionsRequest, opts ...gax.CallOption) *CryptoKeyVersionIterator { + return c.internalClient.ListCryptoKeyVersions(ctx, req, opts...) +} + +// ListImportJobs lists ImportJobs. +func (c *KeyManagementClient) ListImportJobs(ctx context.Context, req *kmspb.ListImportJobsRequest, opts ...gax.CallOption) *ImportJobIterator { + return c.internalClient.ListImportJobs(ctx, req, opts...) +} + +// GetKeyRing returns metadata for a given KeyRing. +func (c *KeyManagementClient) GetKeyRing(ctx context.Context, req *kmspb.GetKeyRingRequest, opts ...gax.CallOption) (*kmspb.KeyRing, error) { + return c.internalClient.GetKeyRing(ctx, req, opts...) +} + +// GetCryptoKey returns metadata for a given CryptoKey, as +// well as its primary +// CryptoKeyVersion. +func (c *KeyManagementClient) GetCryptoKey(ctx context.Context, req *kmspb.GetCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + return c.internalClient.GetCryptoKey(ctx, req, opts...) +} + +// GetCryptoKeyVersion returns metadata for a given +// CryptoKeyVersion. +func (c *KeyManagementClient) GetCryptoKeyVersion(ctx context.Context, req *kmspb.GetCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + return c.internalClient.GetCryptoKeyVersion(ctx, req, opts...) +} + +// GetPublicKey returns the public key for the given +// CryptoKeyVersion. The +// CryptoKey.purpose must be +// ASYMMETRIC_SIGN +// or +// ASYMMETRIC_DECRYPT. +func (c *KeyManagementClient) GetPublicKey(ctx context.Context, req *kmspb.GetPublicKeyRequest, opts ...gax.CallOption) (*kmspb.PublicKey, error) { + return c.internalClient.GetPublicKey(ctx, req, opts...) +} + +// GetImportJob returns metadata for a given ImportJob. +func (c *KeyManagementClient) GetImportJob(ctx context.Context, req *kmspb.GetImportJobRequest, opts ...gax.CallOption) (*kmspb.ImportJob, error) { + return c.internalClient.GetImportJob(ctx, req, opts...) +} + +// CreateKeyRing create a new KeyRing in a given Project and +// Location. +func (c *KeyManagementClient) CreateKeyRing(ctx context.Context, req *kmspb.CreateKeyRingRequest, opts ...gax.CallOption) (*kmspb.KeyRing, error) { + return c.internalClient.CreateKeyRing(ctx, req, opts...) +} + +// CreateCryptoKey create a new CryptoKey within a +// KeyRing. +// +// CryptoKey.purpose and +// CryptoKey.version_template.algorithm +// are required. +func (c *KeyManagementClient) CreateCryptoKey(ctx context.Context, req *kmspb.CreateCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + return c.internalClient.CreateCryptoKey(ctx, req, opts...) +} + +// CreateCryptoKeyVersion create a new CryptoKeyVersion in a +// CryptoKey. +// +// The server will assign the next sequential id. If unset, +// state will be set to +// ENABLED. +func (c *KeyManagementClient) CreateCryptoKeyVersion(ctx context.Context, req *kmspb.CreateCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + return c.internalClient.CreateCryptoKeyVersion(ctx, req, opts...) +} + +// ImportCryptoKeyVersion import wrapped key material into a +// CryptoKeyVersion. +// +// All requests must specify a CryptoKey. If +// a CryptoKeyVersion is additionally +// specified in the request, key material will be reimported into that +// version. Otherwise, a new version will be created, and will be assigned the +// next sequential id within the CryptoKey. +func (c *KeyManagementClient) ImportCryptoKeyVersion(ctx context.Context, req *kmspb.ImportCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + return c.internalClient.ImportCryptoKeyVersion(ctx, req, opts...) +} + +// CreateImportJob create a new ImportJob within a +// KeyRing. +// +// ImportJob.import_method is +// required. +func (c *KeyManagementClient) CreateImportJob(ctx context.Context, req *kmspb.CreateImportJobRequest, opts ...gax.CallOption) (*kmspb.ImportJob, error) { + return c.internalClient.CreateImportJob(ctx, req, opts...) +} + +// UpdateCryptoKey update a CryptoKey. +func (c *KeyManagementClient) UpdateCryptoKey(ctx context.Context, req *kmspb.UpdateCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + return c.internalClient.UpdateCryptoKey(ctx, req, opts...) +} + +// UpdateCryptoKeyVersion update a CryptoKeyVersion's +// metadata. +// +// state may be changed between +// ENABLED +// and +// DISABLED +// using this method. See +// DestroyCryptoKeyVersion +// and +// RestoreCryptoKeyVersion +// to move between other states. +func (c *KeyManagementClient) UpdateCryptoKeyVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + return c.internalClient.UpdateCryptoKeyVersion(ctx, req, opts...) +} + +// UpdateCryptoKeyPrimaryVersion update the version of a CryptoKey that +// will be used in +// Encrypt. +// +// Returns an error if called on a key whose purpose is not +// ENCRYPT_DECRYPT. +func (c *KeyManagementClient) UpdateCryptoKeyPrimaryVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyPrimaryVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + return c.internalClient.UpdateCryptoKeyPrimaryVersion(ctx, req, opts...) +} + +// DestroyCryptoKeyVersion schedule a CryptoKeyVersion for +// destruction. +// +// Upon calling this method, +// CryptoKeyVersion.state will +// be set to +// DESTROY_SCHEDULED, +// and destroy_time will +// be set to the time +// destroy_scheduled_duration +// in the future. At that time, the +// state will automatically +// change to +// DESTROYED, +// and the key material will be irrevocably destroyed. +// +// Before the +// destroy_time is +// reached, +// RestoreCryptoKeyVersion +// may be called to reverse the process. +func (c *KeyManagementClient) DestroyCryptoKeyVersion(ctx context.Context, req *kmspb.DestroyCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + return c.internalClient.DestroyCryptoKeyVersion(ctx, req, opts...) +} + +// RestoreCryptoKeyVersion restore a CryptoKeyVersion in the +// DESTROY_SCHEDULED +// state. +// +// Upon restoration of the CryptoKeyVersion, +// state will be set to +// DISABLED, +// and destroy_time will +// be cleared. +func (c *KeyManagementClient) RestoreCryptoKeyVersion(ctx context.Context, req *kmspb.RestoreCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + return c.internalClient.RestoreCryptoKeyVersion(ctx, req, opts...) +} + +// Encrypt encrypts data, so that it can only be recovered by a call to +// Decrypt. The +// CryptoKey.purpose must be +// ENCRYPT_DECRYPT. +func (c *KeyManagementClient) Encrypt(ctx context.Context, req *kmspb.EncryptRequest, opts ...gax.CallOption) (*kmspb.EncryptResponse, error) { + return c.internalClient.Encrypt(ctx, req, opts...) +} + +// Decrypt decrypts data that was protected by +// Encrypt. The +// CryptoKey.purpose must be +// ENCRYPT_DECRYPT. +func (c *KeyManagementClient) Decrypt(ctx context.Context, req *kmspb.DecryptRequest, opts ...gax.CallOption) (*kmspb.DecryptResponse, error) { + return c.internalClient.Decrypt(ctx, req, opts...) +} + +// AsymmetricSign signs data using a CryptoKeyVersion +// with CryptoKey.purpose +// ASYMMETRIC_SIGN, producing a signature that can be verified with the public +// key retrieved from +// GetPublicKey. +func (c *KeyManagementClient) AsymmetricSign(ctx context.Context, req *kmspb.AsymmetricSignRequest, opts ...gax.CallOption) (*kmspb.AsymmetricSignResponse, error) { + return c.internalClient.AsymmetricSign(ctx, req, opts...) +} + +// AsymmetricDecrypt decrypts data that was encrypted with a public key retrieved from +// GetPublicKey +// corresponding to a CryptoKeyVersion +// with CryptoKey.purpose +// ASYMMETRIC_DECRYPT. +func (c *KeyManagementClient) AsymmetricDecrypt(ctx context.Context, req *kmspb.AsymmetricDecryptRequest, opts ...gax.CallOption) (*kmspb.AsymmetricDecryptResponse, error) { + return c.internalClient.AsymmetricDecrypt(ctx, req, opts...) +} + +// MacSign signs data using a CryptoKeyVersion +// with CryptoKey.purpose MAC, +// producing a tag that can be verified by another source with the same key. +func (c *KeyManagementClient) MacSign(ctx context.Context, req *kmspb.MacSignRequest, opts ...gax.CallOption) (*kmspb.MacSignResponse, error) { + return c.internalClient.MacSign(ctx, req, opts...) +} + +// MacVerify verifies MAC tag using a +// CryptoKeyVersion with +// CryptoKey.purpose MAC, and returns +// a response that indicates whether or not the verification was successful. +func (c *KeyManagementClient) MacVerify(ctx context.Context, req *kmspb.MacVerifyRequest, opts ...gax.CallOption) (*kmspb.MacVerifyResponse, error) { + return c.internalClient.MacVerify(ctx, req, opts...) +} + +// GenerateRandomBytes generate random bytes using the Cloud KMS randomness source in the provided +// location. +func (c *KeyManagementClient) GenerateRandomBytes(ctx context.Context, req *kmspb.GenerateRandomBytesRequest, opts ...gax.CallOption) (*kmspb.GenerateRandomBytesResponse, error) { + return c.internalClient.GenerateRandomBytes(ctx, req, opts...) +} + +// GetLocation gets information about a location. +func (c *KeyManagementClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + return c.internalClient.GetLocation(ctx, req, opts...) +} + +// ListLocations lists information about the supported locations for this service. +func (c *KeyManagementClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + return c.internalClient.ListLocations(ctx, req, opts...) +} + +// GetIamPolicy gets the access control policy for a resource. Returns an empty policy +// if the resource exists and does not have a policy set. +func (c *KeyManagementClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.GetIamPolicy(ctx, req, opts...) +} + +// SetIamPolicy sets the access control policy on the specified resource. Replaces +// any existing policy. +// +// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED +// errors. +func (c *KeyManagementClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.SetIamPolicy(ctx, req, opts...) +} + +// TestIamPermissions returns permissions that a caller has on the specified resource. If the +// resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware UIs and command-line tools, not for authorization +// checking. This operation may “fail open” without warning. +func (c *KeyManagementClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + return c.internalClient.TestIamPermissions(ctx, req, opts...) +} + +// keyManagementGRPCClient is a client for interacting with Cloud Key Management Service (KMS) API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type keyManagementGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE + disableDeadlines bool + + // Points back to the CallOptions field of the containing KeyManagementClient + CallOptions **KeyManagementCallOptions + + // The gRPC API client. + keyManagementClient kmspb.KeyManagementServiceClient + + iamPolicyClient iampb.IAMPolicyClient + + locationsClient locationpb.LocationsClient + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewKeyManagementClient creates a new key management service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// # Google Cloud Key Management Service +// +// Manages cryptographic keys and operations using those keys. Implements a REST +// model with the following objects: +// +// KeyRing +// +// CryptoKey +// +// CryptoKeyVersion +// +// ImportJob +// +// If you are using manual gRPC libraries, see +// Using gRPC with Cloud KMS (at https://cloud.google.com/kms/docs/grpc). +func NewKeyManagementClient(ctx context.Context, opts ...option.ClientOption) (*KeyManagementClient, error) { + clientOpts := defaultKeyManagementGRPCClientOptions() + if newKeyManagementClientHook != nil { + hookOpts, err := newKeyManagementClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + disableDeadlines, err := checkDisableDeadlines() + if err != nil { + return nil, err + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := KeyManagementClient{CallOptions: defaultKeyManagementCallOptions()} + + c := &keyManagementGRPCClient{ + connPool: connPool, + disableDeadlines: disableDeadlines, + keyManagementClient: kmspb.NewKeyManagementServiceClient(connPool), + CallOptions: &client.CallOptions, + iamPolicyClient: iampb.NewIAMPolicyClient(connPool), + locationsClient: locationpb.NewLocationsClient(connPool), + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *keyManagementGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *keyManagementGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *keyManagementGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *keyManagementGRPCClient) ListKeyRings(ctx context.Context, req *kmspb.ListKeyRingsRequest, opts ...gax.CallOption) *KeyRingIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListKeyRings[0:len((*c.CallOptions).ListKeyRings):len((*c.CallOptions).ListKeyRings)], opts...) + it := &KeyRingIterator{} + req = proto.Clone(req).(*kmspb.ListKeyRingsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.KeyRing, string, error) { + resp := &kmspb.ListKeyRingsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.ListKeyRings(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetKeyRings(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *keyManagementGRPCClient) ListCryptoKeys(ctx context.Context, req *kmspb.ListCryptoKeysRequest, opts ...gax.CallOption) *CryptoKeyIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListCryptoKeys[0:len((*c.CallOptions).ListCryptoKeys):len((*c.CallOptions).ListCryptoKeys)], opts...) + it := &CryptoKeyIterator{} + req = proto.Clone(req).(*kmspb.ListCryptoKeysRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.CryptoKey, string, error) { + resp := &kmspb.ListCryptoKeysResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.ListCryptoKeys(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetCryptoKeys(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *keyManagementGRPCClient) ListCryptoKeyVersions(ctx context.Context, req *kmspb.ListCryptoKeyVersionsRequest, opts ...gax.CallOption) *CryptoKeyVersionIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListCryptoKeyVersions[0:len((*c.CallOptions).ListCryptoKeyVersions):len((*c.CallOptions).ListCryptoKeyVersions)], opts...) + it := &CryptoKeyVersionIterator{} + req = proto.Clone(req).(*kmspb.ListCryptoKeyVersionsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.CryptoKeyVersion, string, error) { + resp := &kmspb.ListCryptoKeyVersionsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.ListCryptoKeyVersions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetCryptoKeyVersions(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *keyManagementGRPCClient) ListImportJobs(ctx context.Context, req *kmspb.ListImportJobsRequest, opts ...gax.CallOption) *ImportJobIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListImportJobs[0:len((*c.CallOptions).ListImportJobs):len((*c.CallOptions).ListImportJobs)], opts...) + it := &ImportJobIterator{} + req = proto.Clone(req).(*kmspb.ListImportJobsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.ImportJob, string, error) { + resp := &kmspb.ListImportJobsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.ListImportJobs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetImportJobs(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *keyManagementGRPCClient) GetKeyRing(ctx context.Context, req *kmspb.GetKeyRingRequest, opts ...gax.CallOption) (*kmspb.KeyRing, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetKeyRing[0:len((*c.CallOptions).GetKeyRing):len((*c.CallOptions).GetKeyRing)], opts...) + var resp *kmspb.KeyRing + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.GetKeyRing(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) GetCryptoKey(ctx context.Context, req *kmspb.GetCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetCryptoKey[0:len((*c.CallOptions).GetCryptoKey):len((*c.CallOptions).GetCryptoKey)], opts...) + var resp *kmspb.CryptoKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.GetCryptoKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) GetCryptoKeyVersion(ctx context.Context, req *kmspb.GetCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetCryptoKeyVersion[0:len((*c.CallOptions).GetCryptoKeyVersion):len((*c.CallOptions).GetCryptoKeyVersion)], opts...) + var resp *kmspb.CryptoKeyVersion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.GetCryptoKeyVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) GetPublicKey(ctx context.Context, req *kmspb.GetPublicKeyRequest, opts ...gax.CallOption) (*kmspb.PublicKey, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetPublicKey[0:len((*c.CallOptions).GetPublicKey):len((*c.CallOptions).GetPublicKey)], opts...) + var resp *kmspb.PublicKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.GetPublicKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) GetImportJob(ctx context.Context, req *kmspb.GetImportJobRequest, opts ...gax.CallOption) (*kmspb.ImportJob, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetImportJob[0:len((*c.CallOptions).GetImportJob):len((*c.CallOptions).GetImportJob)], opts...) + var resp *kmspb.ImportJob + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.GetImportJob(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) CreateKeyRing(ctx context.Context, req *kmspb.CreateKeyRingRequest, opts ...gax.CallOption) (*kmspb.KeyRing, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CreateKeyRing[0:len((*c.CallOptions).CreateKeyRing):len((*c.CallOptions).CreateKeyRing)], opts...) + var resp *kmspb.KeyRing + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.CreateKeyRing(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) CreateCryptoKey(ctx context.Context, req *kmspb.CreateCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CreateCryptoKey[0:len((*c.CallOptions).CreateCryptoKey):len((*c.CallOptions).CreateCryptoKey)], opts...) + var resp *kmspb.CryptoKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.CreateCryptoKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) CreateCryptoKeyVersion(ctx context.Context, req *kmspb.CreateCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CreateCryptoKeyVersion[0:len((*c.CallOptions).CreateCryptoKeyVersion):len((*c.CallOptions).CreateCryptoKeyVersion)], opts...) + var resp *kmspb.CryptoKeyVersion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.CreateCryptoKeyVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) ImportCryptoKeyVersion(ctx context.Context, req *kmspb.ImportCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ImportCryptoKeyVersion[0:len((*c.CallOptions).ImportCryptoKeyVersion):len((*c.CallOptions).ImportCryptoKeyVersion)], opts...) + var resp *kmspb.CryptoKeyVersion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.ImportCryptoKeyVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) CreateImportJob(ctx context.Context, req *kmspb.CreateImportJobRequest, opts ...gax.CallOption) (*kmspb.ImportJob, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CreateImportJob[0:len((*c.CallOptions).CreateImportJob):len((*c.CallOptions).CreateImportJob)], opts...) + var resp *kmspb.ImportJob + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.CreateImportJob(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) UpdateCryptoKey(ctx context.Context, req *kmspb.UpdateCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "crypto_key.name", url.QueryEscape(req.GetCryptoKey().GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).UpdateCryptoKey[0:len((*c.CallOptions).UpdateCryptoKey):len((*c.CallOptions).UpdateCryptoKey)], opts...) + var resp *kmspb.CryptoKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.UpdateCryptoKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) UpdateCryptoKeyVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "crypto_key_version.name", url.QueryEscape(req.GetCryptoKeyVersion().GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).UpdateCryptoKeyVersion[0:len((*c.CallOptions).UpdateCryptoKeyVersion):len((*c.CallOptions).UpdateCryptoKeyVersion)], opts...) + var resp *kmspb.CryptoKeyVersion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.UpdateCryptoKeyVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) UpdateCryptoKeyPrimaryVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyPrimaryVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).UpdateCryptoKeyPrimaryVersion[0:len((*c.CallOptions).UpdateCryptoKeyPrimaryVersion):len((*c.CallOptions).UpdateCryptoKeyPrimaryVersion)], opts...) + var resp *kmspb.CryptoKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.UpdateCryptoKeyPrimaryVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) DestroyCryptoKeyVersion(ctx context.Context, req *kmspb.DestroyCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).DestroyCryptoKeyVersion[0:len((*c.CallOptions).DestroyCryptoKeyVersion):len((*c.CallOptions).DestroyCryptoKeyVersion)], opts...) + var resp *kmspb.CryptoKeyVersion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.DestroyCryptoKeyVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) RestoreCryptoKeyVersion(ctx context.Context, req *kmspb.RestoreCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).RestoreCryptoKeyVersion[0:len((*c.CallOptions).RestoreCryptoKeyVersion):len((*c.CallOptions).RestoreCryptoKeyVersion)], opts...) + var resp *kmspb.CryptoKeyVersion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.RestoreCryptoKeyVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) Encrypt(ctx context.Context, req *kmspb.EncryptRequest, opts ...gax.CallOption) (*kmspb.EncryptResponse, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).Encrypt[0:len((*c.CallOptions).Encrypt):len((*c.CallOptions).Encrypt)], opts...) + var resp *kmspb.EncryptResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.Encrypt(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) Decrypt(ctx context.Context, req *kmspb.DecryptRequest, opts ...gax.CallOption) (*kmspb.DecryptResponse, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).Decrypt[0:len((*c.CallOptions).Decrypt):len((*c.CallOptions).Decrypt)], opts...) + var resp *kmspb.DecryptResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.Decrypt(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) AsymmetricSign(ctx context.Context, req *kmspb.AsymmetricSignRequest, opts ...gax.CallOption) (*kmspb.AsymmetricSignResponse, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).AsymmetricSign[0:len((*c.CallOptions).AsymmetricSign):len((*c.CallOptions).AsymmetricSign)], opts...) + var resp *kmspb.AsymmetricSignResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.AsymmetricSign(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) AsymmetricDecrypt(ctx context.Context, req *kmspb.AsymmetricDecryptRequest, opts ...gax.CallOption) (*kmspb.AsymmetricDecryptResponse, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).AsymmetricDecrypt[0:len((*c.CallOptions).AsymmetricDecrypt):len((*c.CallOptions).AsymmetricDecrypt)], opts...) + var resp *kmspb.AsymmetricDecryptResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.AsymmetricDecrypt(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) MacSign(ctx context.Context, req *kmspb.MacSignRequest, opts ...gax.CallOption) (*kmspb.MacSignResponse, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).MacSign[0:len((*c.CallOptions).MacSign):len((*c.CallOptions).MacSign)], opts...) + var resp *kmspb.MacSignResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.MacSign(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) MacVerify(ctx context.Context, req *kmspb.MacVerifyRequest, opts ...gax.CallOption) (*kmspb.MacVerifyResponse, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).MacVerify[0:len((*c.CallOptions).MacVerify):len((*c.CallOptions).MacVerify)], opts...) + var resp *kmspb.MacVerifyResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.MacVerify(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) GenerateRandomBytes(ctx context.Context, req *kmspb.GenerateRandomBytesRequest, opts ...gax.CallOption) (*kmspb.GenerateRandomBytesResponse, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "location", url.QueryEscape(req.GetLocation()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GenerateRandomBytes[0:len((*c.CallOptions).GenerateRandomBytes):len((*c.CallOptions).GenerateRandomBytes)], opts...) + var resp *kmspb.GenerateRandomBytesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.GenerateRandomBytes(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...) + var resp *locationpb.Location + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListLocations[0:len((*c.CallOptions).ListLocations):len((*c.CallOptions).ListLocations)], opts...) + it := &LocationIterator{} + req = proto.Clone(req).(*locationpb.ListLocationsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) { + resp := &locationpb.ListLocationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetLocations(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *keyManagementGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CryptoKeyIterator manages a stream of *kmspb.CryptoKey. +type CryptoKeyIterator struct { + items []*kmspb.CryptoKey + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKey, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *CryptoKeyIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *CryptoKeyIterator) Next() (*kmspb.CryptoKey, error) { + var item *kmspb.CryptoKey + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *CryptoKeyIterator) bufLen() int { + return len(it.items) +} + +func (it *CryptoKeyIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// CryptoKeyVersionIterator manages a stream of *kmspb.CryptoKeyVersion. +type CryptoKeyVersionIterator struct { + items []*kmspb.CryptoKeyVersion + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKeyVersion, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *CryptoKeyVersionIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *CryptoKeyVersionIterator) Next() (*kmspb.CryptoKeyVersion, error) { + var item *kmspb.CryptoKeyVersion + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *CryptoKeyVersionIterator) bufLen() int { + return len(it.items) +} + +func (it *CryptoKeyVersionIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// ImportJobIterator manages a stream of *kmspb.ImportJob. +type ImportJobIterator struct { + items []*kmspb.ImportJob + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*kmspb.ImportJob, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ImportJobIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ImportJobIterator) Next() (*kmspb.ImportJob, error) { + var item *kmspb.ImportJob + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ImportJobIterator) bufLen() int { + return len(it.items) +} + +func (it *ImportJobIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// KeyRingIterator manages a stream of *kmspb.KeyRing. +type KeyRingIterator struct { + items []*kmspb.KeyRing + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*kmspb.KeyRing, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *KeyRingIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *KeyRingIterator) Next() (*kmspb.KeyRing, error) { + var item *kmspb.KeyRing + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *KeyRingIterator) bufLen() int { + return len(it.items) +} + +func (it *KeyRingIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go new file mode 100644 index 00000000000..2cf0cd4f4f1 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go @@ -0,0 +1,1301 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.9 +// source: google/cloud/kms/v1/ekm_service.proto + +package kmspb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Request message for +// [EkmService.ListEkmConnections][google.cloud.kms.v1.EkmService.ListEkmConnections]. +type ListEkmConnectionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the location associated with the + // [EkmConnections][google.cloud.kms.v1.EkmConnection] to list, in the format + // `projects/*/locations/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. Optional limit on the number of + // [EkmConnections][google.cloud.kms.v1.EkmConnection] to include in the + // response. Further [EkmConnections][google.cloud.kms.v1.EkmConnection] can + // subsequently be obtained by including the + // [ListEkmConnectionsResponse.next_page_token][google.cloud.kms.v1.ListEkmConnectionsResponse.next_page_token] + // in a subsequent request. If unspecified, the server will pick an + // appropriate default. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. Optional pagination token, returned earlier via + // [ListEkmConnectionsResponse.next_page_token][google.cloud.kms.v1.ListEkmConnectionsResponse.next_page_token]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // Optional. Only include resources that match the filter in the response. For + // more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. Specify how the results should be sorted. If not specified, the + // results will be sorted in the default order. For more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + OrderBy string `protobuf:"bytes,5,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` +} + +func (x *ListEkmConnectionsRequest) Reset() { + *x = ListEkmConnectionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListEkmConnectionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEkmConnectionsRequest) ProtoMessage() {} + +func (x *ListEkmConnectionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListEkmConnectionsRequest.ProtoReflect.Descriptor instead. +func (*ListEkmConnectionsRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ListEkmConnectionsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListEkmConnectionsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListEkmConnectionsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListEkmConnectionsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListEkmConnectionsRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +// Response message for +// [EkmService.ListEkmConnections][google.cloud.kms.v1.EkmService.ListEkmConnections]. +type ListEkmConnectionsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of [EkmConnections][google.cloud.kms.v1.EkmConnection]. + EkmConnections []*EkmConnection `protobuf:"bytes,1,rep,name=ekm_connections,json=ekmConnections,proto3" json:"ekm_connections,omitempty"` + // A token to retrieve next page of results. Pass this value in + // [ListEkmConnectionsRequest.page_token][google.cloud.kms.v1.ListEkmConnectionsRequest.page_token] + // to retrieve the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of [EkmConnections][google.cloud.kms.v1.EkmConnection] + // that matched the query. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListEkmConnectionsResponse) Reset() { + *x = ListEkmConnectionsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListEkmConnectionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEkmConnectionsResponse) ProtoMessage() {} + +func (x *ListEkmConnectionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListEkmConnectionsResponse.ProtoReflect.Descriptor instead. +func (*ListEkmConnectionsResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListEkmConnectionsResponse) GetEkmConnections() []*EkmConnection { + if x != nil { + return x.EkmConnections + } + return nil +} + +func (x *ListEkmConnectionsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListEkmConnectionsResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// Request message for +// [EkmService.GetEkmConnection][google.cloud.kms.v1.EkmService.GetEkmConnection]. +type GetEkmConnectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.EkmConnection.name] of the + // [EkmConnection][google.cloud.kms.v1.EkmConnection] to get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetEkmConnectionRequest) Reset() { + *x = GetEkmConnectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEkmConnectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEkmConnectionRequest) ProtoMessage() {} + +func (x *GetEkmConnectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEkmConnectionRequest.ProtoReflect.Descriptor instead. +func (*GetEkmConnectionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{2} +} + +func (x *GetEkmConnectionRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [EkmService.CreateEkmConnection][google.cloud.kms.v1.EkmService.CreateEkmConnection]. +type CreateEkmConnectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the location associated with the + // [EkmConnection][google.cloud.kms.v1.EkmConnection], in the format + // `projects/*/locations/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. It must be unique within a location and match the regular + // expression `[a-zA-Z0-9_-]{1,63}`. + EkmConnectionId string `protobuf:"bytes,2,opt,name=ekm_connection_id,json=ekmConnectionId,proto3" json:"ekm_connection_id,omitempty"` + // Required. An [EkmConnection][google.cloud.kms.v1.EkmConnection] with + // initial field values. + EkmConnection *EkmConnection `protobuf:"bytes,3,opt,name=ekm_connection,json=ekmConnection,proto3" json:"ekm_connection,omitempty"` +} + +func (x *CreateEkmConnectionRequest) Reset() { + *x = CreateEkmConnectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateEkmConnectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateEkmConnectionRequest) ProtoMessage() {} + +func (x *CreateEkmConnectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateEkmConnectionRequest.ProtoReflect.Descriptor instead. +func (*CreateEkmConnectionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{3} +} + +func (x *CreateEkmConnectionRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateEkmConnectionRequest) GetEkmConnectionId() string { + if x != nil { + return x.EkmConnectionId + } + return "" +} + +func (x *CreateEkmConnectionRequest) GetEkmConnection() *EkmConnection { + if x != nil { + return x.EkmConnection + } + return nil +} + +// Request message for +// [EkmService.UpdateEkmConnection][google.cloud.kms.v1.EkmService.UpdateEkmConnection]. +type UpdateEkmConnectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. [EkmConnection][google.cloud.kms.v1.EkmConnection] with updated + // values. + EkmConnection *EkmConnection `protobuf:"bytes,1,opt,name=ekm_connection,json=ekmConnection,proto3" json:"ekm_connection,omitempty"` + // Required. List of fields to be updated in this request. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateEkmConnectionRequest) Reset() { + *x = UpdateEkmConnectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateEkmConnectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateEkmConnectionRequest) ProtoMessage() {} + +func (x *UpdateEkmConnectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateEkmConnectionRequest.ProtoReflect.Descriptor instead. +func (*UpdateEkmConnectionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateEkmConnectionRequest) GetEkmConnection() *EkmConnection { + if x != nil { + return x.EkmConnection + } + return nil +} + +func (x *UpdateEkmConnectionRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// A [Certificate][google.cloud.kms.v1.Certificate] represents an X.509 +// certificate used to authenticate HTTPS connections to EKM replicas. +type Certificate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The raw certificate bytes in DER format. + RawDer []byte `protobuf:"bytes,1,opt,name=raw_der,json=rawDer,proto3" json:"raw_der,omitempty"` + // Output only. True if the certificate was parsed successfully. + Parsed bool `protobuf:"varint,2,opt,name=parsed,proto3" json:"parsed,omitempty"` + // Output only. The issuer distinguished name in RFC 2253 format. Only present + // if [parsed][google.cloud.kms.v1.Certificate.parsed] is true. + Issuer string `protobuf:"bytes,3,opt,name=issuer,proto3" json:"issuer,omitempty"` + // Output only. The subject distinguished name in RFC 2253 format. Only + // present if [parsed][google.cloud.kms.v1.Certificate.parsed] is true. + Subject string `protobuf:"bytes,4,opt,name=subject,proto3" json:"subject,omitempty"` + // Output only. The subject Alternative DNS names. Only present if + // [parsed][google.cloud.kms.v1.Certificate.parsed] is true. + SubjectAlternativeDnsNames []string `protobuf:"bytes,5,rep,name=subject_alternative_dns_names,json=subjectAlternativeDnsNames,proto3" json:"subject_alternative_dns_names,omitempty"` + // Output only. The certificate is not valid before this time. Only present if + // [parsed][google.cloud.kms.v1.Certificate.parsed] is true. + NotBeforeTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=not_before_time,json=notBeforeTime,proto3" json:"not_before_time,omitempty"` + // Output only. The certificate is not valid after this time. Only present if + // [parsed][google.cloud.kms.v1.Certificate.parsed] is true. + NotAfterTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=not_after_time,json=notAfterTime,proto3" json:"not_after_time,omitempty"` + // Output only. The certificate serial number as a hex string. Only present if + // [parsed][google.cloud.kms.v1.Certificate.parsed] is true. + SerialNumber string `protobuf:"bytes,8,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"` + // Output only. The SHA-256 certificate fingerprint as a hex string. Only + // present if [parsed][google.cloud.kms.v1.Certificate.parsed] is true. + Sha256Fingerprint string `protobuf:"bytes,9,opt,name=sha256_fingerprint,json=sha256Fingerprint,proto3" json:"sha256_fingerprint,omitempty"` +} + +func (x *Certificate) Reset() { + *x = Certificate{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Certificate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Certificate) ProtoMessage() {} + +func (x *Certificate) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Certificate.ProtoReflect.Descriptor instead. +func (*Certificate) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{5} +} + +func (x *Certificate) GetRawDer() []byte { + if x != nil { + return x.RawDer + } + return nil +} + +func (x *Certificate) GetParsed() bool { + if x != nil { + return x.Parsed + } + return false +} + +func (x *Certificate) GetIssuer() string { + if x != nil { + return x.Issuer + } + return "" +} + +func (x *Certificate) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *Certificate) GetSubjectAlternativeDnsNames() []string { + if x != nil { + return x.SubjectAlternativeDnsNames + } + return nil +} + +func (x *Certificate) GetNotBeforeTime() *timestamppb.Timestamp { + if x != nil { + return x.NotBeforeTime + } + return nil +} + +func (x *Certificate) GetNotAfterTime() *timestamppb.Timestamp { + if x != nil { + return x.NotAfterTime + } + return nil +} + +func (x *Certificate) GetSerialNumber() string { + if x != nil { + return x.SerialNumber + } + return "" +} + +func (x *Certificate) GetSha256Fingerprint() string { + if x != nil { + return x.Sha256Fingerprint + } + return "" +} + +// An [EkmConnection][google.cloud.kms.v1.EkmConnection] represents an +// individual EKM connection. It can be used for creating +// [CryptoKeys][google.cloud.kms.v1.CryptoKey] and +// [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] with a +// [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of +// [EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC], as well as +// performing cryptographic operations using keys created within the +// [EkmConnection][google.cloud.kms.v1.EkmConnection]. +type EkmConnection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The resource name for the + // [EkmConnection][google.cloud.kms.v1.EkmConnection] in the format + // `projects/*/locations/*/ekmConnections/*`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. The time at which the + // [EkmConnection][google.cloud.kms.v1.EkmConnection] was created. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // A list of + // [ServiceResolvers][google.cloud.kms.v1.EkmConnection.ServiceResolver] where + // the EKM can be reached. There should be one ServiceResolver per EKM + // replica. Currently, only a single + // [ServiceResolver][google.cloud.kms.v1.EkmConnection.ServiceResolver] is + // supported. + ServiceResolvers []*EkmConnection_ServiceResolver `protobuf:"bytes,3,rep,name=service_resolvers,json=serviceResolvers,proto3" json:"service_resolvers,omitempty"` + // Optional. Etag of the currently stored + // [EkmConnection][google.cloud.kms.v1.EkmConnection]. + Etag string `protobuf:"bytes,5,opt,name=etag,proto3" json:"etag,omitempty"` +} + +func (x *EkmConnection) Reset() { + *x = EkmConnection{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EkmConnection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EkmConnection) ProtoMessage() {} + +func (x *EkmConnection) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EkmConnection.ProtoReflect.Descriptor instead. +func (*EkmConnection) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{6} +} + +func (x *EkmConnection) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *EkmConnection) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *EkmConnection) GetServiceResolvers() []*EkmConnection_ServiceResolver { + if x != nil { + return x.ServiceResolvers + } + return nil +} + +func (x *EkmConnection) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +// A [ServiceResolver][google.cloud.kms.v1.EkmConnection.ServiceResolver] +// represents an EKM replica that can be reached within an +// [EkmConnection][google.cloud.kms.v1.EkmConnection]. +type EkmConnection_ServiceResolver struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the Service Directory service pointing to + // an EKM replica, in the format + // `projects/*/locations/*/namespaces/*/services/*`. + ServiceDirectoryService string `protobuf:"bytes,1,opt,name=service_directory_service,json=serviceDirectoryService,proto3" json:"service_directory_service,omitempty"` + // Optional. The filter applied to the endpoints of the resolved service. If + // no filter is specified, all endpoints will be considered. An endpoint + // will be chosen arbitrarily from the filtered list for each request. + // + // For endpoint filter syntax and examples, see + // https://cloud.google.com/service-directory/docs/reference/rpc/google.cloud.servicedirectory.v1#resolveservicerequest. + EndpointFilter string `protobuf:"bytes,2,opt,name=endpoint_filter,json=endpointFilter,proto3" json:"endpoint_filter,omitempty"` + // Required. The hostname of the EKM replica used at TLS and HTTP layers. + Hostname string `protobuf:"bytes,3,opt,name=hostname,proto3" json:"hostname,omitempty"` + // Required. A list of leaf server certificates used to authenticate HTTPS + // connections to the EKM replica. Currently, a maximum of 10 + // [Certificate][google.cloud.kms.v1.Certificate] is supported. + ServerCertificates []*Certificate `protobuf:"bytes,4,rep,name=server_certificates,json=serverCertificates,proto3" json:"server_certificates,omitempty"` +} + +func (x *EkmConnection_ServiceResolver) Reset() { + *x = EkmConnection_ServiceResolver{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EkmConnection_ServiceResolver) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EkmConnection_ServiceResolver) ProtoMessage() {} + +func (x *EkmConnection_ServiceResolver) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EkmConnection_ServiceResolver.ProtoReflect.Descriptor instead. +func (*EkmConnection_ServiceResolver) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *EkmConnection_ServiceResolver) GetServiceDirectoryService() string { + if x != nil { + return x.ServiceDirectoryService + } + return "" +} + +func (x *EkmConnection_ServiceResolver) GetEndpointFilter() string { + if x != nil { + return x.EndpointFilter + } + return "" +} + +func (x *EkmConnection_ServiceResolver) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *EkmConnection_ServiceResolver) GetServerCertificates() []*Certificate { + if x != nil { + return x.ServerCertificates + } + return nil +} + +var File_google_cloud_kms_v1_ekm_service_proto protoreflect.FileDescriptor + +var file_google_cloud_kms_v1_ekm_service_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b, + 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x6b, 0x6d, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xe1, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, + 0x62, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6f, + 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x22, 0xb0, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x45, + 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0f, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, + 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x5c, 0x0a, 0x17, 0x47, 0x65, 0x74, + 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xe0, 0x01, 0x0a, 0x1a, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x11, 0x65, 0x6b, 0x6d, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x4e, 0x0a, 0x0e, 0x65, 0x6b, + 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x65, 0x6b, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xae, 0x01, 0x0a, 0x1a, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0e, 0x65, 0x6b, 0x6d, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x65, 0x6b, 0x6d, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xba, 0x03, 0x0a, 0x0b, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x72, + 0x61, 0x77, 0x5f, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x06, 0x72, 0x61, 0x77, 0x44, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x06, 0x70, 0x61, 0x72, + 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x06, + 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x06, 0x69, 0x73, 0x73, + 0x75, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x46, 0x0a, 0x1d, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x1a, + 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x44, 0x6e, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x0f, 0x6e, 0x6f, + 0x74, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x12, 0x45, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6e, 0x6f, + 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x12, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x66, + 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x11, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x46, 0x69, 0x6e, + 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x22, 0x81, 0x05, 0x0a, 0x0d, 0x45, 0x6b, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x5f, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, + 0x6c, 0x76, 0x65, 0x72, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, + 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x1a, + 0xa5, 0x02, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, + 0x76, 0x65, 0x72, 0x12, 0x6b, 0x0a, 0x19, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2f, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x29, 0x0a, 0x27, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x2c, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1f, + 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x56, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x3a, 0x73, 0xea, 0x41, 0x70, 0x0a, 0x25, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x47, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x65, 0x6b, 0x6d, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x32, 0xb1, 0x07, 0x0a, + 0x0a, 0x45, 0x6b, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xba, 0x01, 0x0a, 0x12, + 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6b, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6b, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x43, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xda, + 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xa7, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, + 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x41, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0xe0, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x0e, 0x65, 0x6b, 0x6d, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0xda, 0x41, 0x27, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x2c, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0xe2, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x76, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x53, 0x32, 0x41, 0x2f, 0x76, 0x31, 0x2f, + 0x7b, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x0e, 0x65, + 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0xda, 0x41, 0x1a, + 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, + 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, + 0x42, 0x92, 0x02, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0f, 0x45, 0x6b, + 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x36, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b, 0x6d, 0x73, + 0x2f, 0x76, 0x31, 0x3b, 0x6b, 0x6d, 0x73, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, + 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, + 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0xea, 0x41, 0x7c, 0x0a, 0x27, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x51, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_cloud_kms_v1_ekm_service_proto_rawDescOnce sync.Once + file_google_cloud_kms_v1_ekm_service_proto_rawDescData = file_google_cloud_kms_v1_ekm_service_proto_rawDesc +) + +func file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP() []byte { + file_google_cloud_kms_v1_ekm_service_proto_rawDescOnce.Do(func() { + file_google_cloud_kms_v1_ekm_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_kms_v1_ekm_service_proto_rawDescData) + }) + return file_google_cloud_kms_v1_ekm_service_proto_rawDescData +} + +var file_google_cloud_kms_v1_ekm_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_google_cloud_kms_v1_ekm_service_proto_goTypes = []interface{}{ + (*ListEkmConnectionsRequest)(nil), // 0: google.cloud.kms.v1.ListEkmConnectionsRequest + (*ListEkmConnectionsResponse)(nil), // 1: google.cloud.kms.v1.ListEkmConnectionsResponse + (*GetEkmConnectionRequest)(nil), // 2: google.cloud.kms.v1.GetEkmConnectionRequest + (*CreateEkmConnectionRequest)(nil), // 3: google.cloud.kms.v1.CreateEkmConnectionRequest + (*UpdateEkmConnectionRequest)(nil), // 4: google.cloud.kms.v1.UpdateEkmConnectionRequest + (*Certificate)(nil), // 5: google.cloud.kms.v1.Certificate + (*EkmConnection)(nil), // 6: google.cloud.kms.v1.EkmConnection + (*EkmConnection_ServiceResolver)(nil), // 7: google.cloud.kms.v1.EkmConnection.ServiceResolver + (*fieldmaskpb.FieldMask)(nil), // 8: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp +} +var file_google_cloud_kms_v1_ekm_service_proto_depIdxs = []int32{ + 6, // 0: google.cloud.kms.v1.ListEkmConnectionsResponse.ekm_connections:type_name -> google.cloud.kms.v1.EkmConnection + 6, // 1: google.cloud.kms.v1.CreateEkmConnectionRequest.ekm_connection:type_name -> google.cloud.kms.v1.EkmConnection + 6, // 2: google.cloud.kms.v1.UpdateEkmConnectionRequest.ekm_connection:type_name -> google.cloud.kms.v1.EkmConnection + 8, // 3: google.cloud.kms.v1.UpdateEkmConnectionRequest.update_mask:type_name -> google.protobuf.FieldMask + 9, // 4: google.cloud.kms.v1.Certificate.not_before_time:type_name -> google.protobuf.Timestamp + 9, // 5: google.cloud.kms.v1.Certificate.not_after_time:type_name -> google.protobuf.Timestamp + 9, // 6: google.cloud.kms.v1.EkmConnection.create_time:type_name -> google.protobuf.Timestamp + 7, // 7: google.cloud.kms.v1.EkmConnection.service_resolvers:type_name -> google.cloud.kms.v1.EkmConnection.ServiceResolver + 5, // 8: google.cloud.kms.v1.EkmConnection.ServiceResolver.server_certificates:type_name -> google.cloud.kms.v1.Certificate + 0, // 9: google.cloud.kms.v1.EkmService.ListEkmConnections:input_type -> google.cloud.kms.v1.ListEkmConnectionsRequest + 2, // 10: google.cloud.kms.v1.EkmService.GetEkmConnection:input_type -> google.cloud.kms.v1.GetEkmConnectionRequest + 3, // 11: google.cloud.kms.v1.EkmService.CreateEkmConnection:input_type -> google.cloud.kms.v1.CreateEkmConnectionRequest + 4, // 12: google.cloud.kms.v1.EkmService.UpdateEkmConnection:input_type -> google.cloud.kms.v1.UpdateEkmConnectionRequest + 1, // 13: google.cloud.kms.v1.EkmService.ListEkmConnections:output_type -> google.cloud.kms.v1.ListEkmConnectionsResponse + 6, // 14: google.cloud.kms.v1.EkmService.GetEkmConnection:output_type -> google.cloud.kms.v1.EkmConnection + 6, // 15: google.cloud.kms.v1.EkmService.CreateEkmConnection:output_type -> google.cloud.kms.v1.EkmConnection + 6, // 16: google.cloud.kms.v1.EkmService.UpdateEkmConnection:output_type -> google.cloud.kms.v1.EkmConnection + 13, // [13:17] is the sub-list for method output_type + 9, // [9:13] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_google_cloud_kms_v1_ekm_service_proto_init() } +func file_google_cloud_kms_v1_ekm_service_proto_init() { + if File_google_cloud_kms_v1_ekm_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListEkmConnectionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListEkmConnectionsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetEkmConnectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateEkmConnectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateEkmConnectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Certificate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EkmConnection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EkmConnection_ServiceResolver); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_cloud_kms_v1_ekm_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_cloud_kms_v1_ekm_service_proto_goTypes, + DependencyIndexes: file_google_cloud_kms_v1_ekm_service_proto_depIdxs, + MessageInfos: file_google_cloud_kms_v1_ekm_service_proto_msgTypes, + }.Build() + File_google_cloud_kms_v1_ekm_service_proto = out.File + file_google_cloud_kms_v1_ekm_service_proto_rawDesc = nil + file_google_cloud_kms_v1_ekm_service_proto_goTypes = nil + file_google_cloud_kms_v1_ekm_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// EkmServiceClient is the client API for EkmService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type EkmServiceClient interface { + // Lists [EkmConnections][google.cloud.kms.v1.EkmConnection]. + ListEkmConnections(ctx context.Context, in *ListEkmConnectionsRequest, opts ...grpc.CallOption) (*ListEkmConnectionsResponse, error) + // Returns metadata for a given + // [EkmConnection][google.cloud.kms.v1.EkmConnection]. + GetEkmConnection(ctx context.Context, in *GetEkmConnectionRequest, opts ...grpc.CallOption) (*EkmConnection, error) + // Creates a new [EkmConnection][google.cloud.kms.v1.EkmConnection] in a given + // Project and Location. + CreateEkmConnection(ctx context.Context, in *CreateEkmConnectionRequest, opts ...grpc.CallOption) (*EkmConnection, error) + // Updates an [EkmConnection][google.cloud.kms.v1.EkmConnection]'s metadata. + UpdateEkmConnection(ctx context.Context, in *UpdateEkmConnectionRequest, opts ...grpc.CallOption) (*EkmConnection, error) +} + +type ekmServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewEkmServiceClient(cc grpc.ClientConnInterface) EkmServiceClient { + return &ekmServiceClient{cc} +} + +func (c *ekmServiceClient) ListEkmConnections(ctx context.Context, in *ListEkmConnectionsRequest, opts ...grpc.CallOption) (*ListEkmConnectionsResponse, error) { + out := new(ListEkmConnectionsResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.EkmService/ListEkmConnections", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ekmServiceClient) GetEkmConnection(ctx context.Context, in *GetEkmConnectionRequest, opts ...grpc.CallOption) (*EkmConnection, error) { + out := new(EkmConnection) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.EkmService/GetEkmConnection", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ekmServiceClient) CreateEkmConnection(ctx context.Context, in *CreateEkmConnectionRequest, opts ...grpc.CallOption) (*EkmConnection, error) { + out := new(EkmConnection) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.EkmService/CreateEkmConnection", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ekmServiceClient) UpdateEkmConnection(ctx context.Context, in *UpdateEkmConnectionRequest, opts ...grpc.CallOption) (*EkmConnection, error) { + out := new(EkmConnection) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.EkmService/UpdateEkmConnection", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// EkmServiceServer is the server API for EkmService service. +type EkmServiceServer interface { + // Lists [EkmConnections][google.cloud.kms.v1.EkmConnection]. + ListEkmConnections(context.Context, *ListEkmConnectionsRequest) (*ListEkmConnectionsResponse, error) + // Returns metadata for a given + // [EkmConnection][google.cloud.kms.v1.EkmConnection]. + GetEkmConnection(context.Context, *GetEkmConnectionRequest) (*EkmConnection, error) + // Creates a new [EkmConnection][google.cloud.kms.v1.EkmConnection] in a given + // Project and Location. + CreateEkmConnection(context.Context, *CreateEkmConnectionRequest) (*EkmConnection, error) + // Updates an [EkmConnection][google.cloud.kms.v1.EkmConnection]'s metadata. + UpdateEkmConnection(context.Context, *UpdateEkmConnectionRequest) (*EkmConnection, error) +} + +// UnimplementedEkmServiceServer can be embedded to have forward compatible implementations. +type UnimplementedEkmServiceServer struct { +} + +func (*UnimplementedEkmServiceServer) ListEkmConnections(context.Context, *ListEkmConnectionsRequest) (*ListEkmConnectionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListEkmConnections not implemented") +} +func (*UnimplementedEkmServiceServer) GetEkmConnection(context.Context, *GetEkmConnectionRequest) (*EkmConnection, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetEkmConnection not implemented") +} +func (*UnimplementedEkmServiceServer) CreateEkmConnection(context.Context, *CreateEkmConnectionRequest) (*EkmConnection, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateEkmConnection not implemented") +} +func (*UnimplementedEkmServiceServer) UpdateEkmConnection(context.Context, *UpdateEkmConnectionRequest) (*EkmConnection, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateEkmConnection not implemented") +} + +func RegisterEkmServiceServer(s *grpc.Server, srv EkmServiceServer) { + s.RegisterService(&_EkmService_serviceDesc, srv) +} + +func _EkmService_ListEkmConnections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListEkmConnectionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EkmServiceServer).ListEkmConnections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.EkmService/ListEkmConnections", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EkmServiceServer).ListEkmConnections(ctx, req.(*ListEkmConnectionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _EkmService_GetEkmConnection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetEkmConnectionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EkmServiceServer).GetEkmConnection(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.EkmService/GetEkmConnection", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EkmServiceServer).GetEkmConnection(ctx, req.(*GetEkmConnectionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _EkmService_CreateEkmConnection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateEkmConnectionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EkmServiceServer).CreateEkmConnection(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.EkmService/CreateEkmConnection", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EkmServiceServer).CreateEkmConnection(ctx, req.(*CreateEkmConnectionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _EkmService_UpdateEkmConnection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateEkmConnectionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EkmServiceServer).UpdateEkmConnection(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.EkmService/UpdateEkmConnection", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EkmServiceServer).UpdateEkmConnection(ctx, req.(*UpdateEkmConnectionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _EkmService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.kms.v1.EkmService", + HandlerType: (*EkmServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListEkmConnections", + Handler: _EkmService_ListEkmConnections_Handler, + }, + { + MethodName: "GetEkmConnection", + Handler: _EkmService_GetEkmConnection_Handler, + }, + { + MethodName: "CreateEkmConnection", + Handler: _EkmService_CreateEkmConnection_Handler, + }, + { + MethodName: "UpdateEkmConnection", + Handler: _EkmService_UpdateEkmConnection_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/kms/v1/ekm_service.proto", +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go new file mode 100644 index 00000000000..83b3568c947 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go @@ -0,0 +1,2545 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.9 +// source: google/cloud/kms/v1/resources.proto + +package kmspb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] specifies how +// cryptographic operations are performed. For more information, see [Protection +// levels] (https://cloud.google.com/kms/docs/algorithms#protection_levels). +type ProtectionLevel int32 + +const ( + // Not specified. + ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED ProtectionLevel = 0 + // Crypto operations are performed in software. + ProtectionLevel_SOFTWARE ProtectionLevel = 1 + // Crypto operations are performed in a Hardware Security Module. + ProtectionLevel_HSM ProtectionLevel = 2 + // Crypto operations are performed by an external key manager. + ProtectionLevel_EXTERNAL ProtectionLevel = 3 + // Crypto operations are performed in an EKM-over-VPC backend. + ProtectionLevel_EXTERNAL_VPC ProtectionLevel = 4 +) + +// Enum value maps for ProtectionLevel. +var ( + ProtectionLevel_name = map[int32]string{ + 0: "PROTECTION_LEVEL_UNSPECIFIED", + 1: "SOFTWARE", + 2: "HSM", + 3: "EXTERNAL", + 4: "EXTERNAL_VPC", + } + ProtectionLevel_value = map[string]int32{ + "PROTECTION_LEVEL_UNSPECIFIED": 0, + "SOFTWARE": 1, + "HSM": 2, + "EXTERNAL": 3, + "EXTERNAL_VPC": 4, + } +) + +func (x ProtectionLevel) Enum() *ProtectionLevel { + p := new(ProtectionLevel) + *p = x + return p +} + +func (x ProtectionLevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ProtectionLevel) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_resources_proto_enumTypes[0].Descriptor() +} + +func (ProtectionLevel) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_resources_proto_enumTypes[0] +} + +func (x ProtectionLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ProtectionLevel.Descriptor instead. +func (ProtectionLevel) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{0} +} + +// [CryptoKeyPurpose][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose] +// describes the cryptographic capabilities of a +// [CryptoKey][google.cloud.kms.v1.CryptoKey]. A given key can only be used +// for the operations allowed by its purpose. For more information, see [Key +// purposes](https://cloud.google.com/kms/docs/algorithms#key_purposes). +type CryptoKey_CryptoKeyPurpose int32 + +const ( + // Not specified. + CryptoKey_CRYPTO_KEY_PURPOSE_UNSPECIFIED CryptoKey_CryptoKeyPurpose = 0 + // [CryptoKeys][google.cloud.kms.v1.CryptoKey] with this purpose may be used + // with [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt] and + // [Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. + CryptoKey_ENCRYPT_DECRYPT CryptoKey_CryptoKeyPurpose = 1 + // [CryptoKeys][google.cloud.kms.v1.CryptoKey] with this purpose may be used + // with + // [AsymmetricSign][google.cloud.kms.v1.KeyManagementService.AsymmetricSign] + // and + // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. + CryptoKey_ASYMMETRIC_SIGN CryptoKey_CryptoKeyPurpose = 5 + // [CryptoKeys][google.cloud.kms.v1.CryptoKey] with this purpose may be used + // with + // [AsymmetricDecrypt][google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt] + // and + // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. + CryptoKey_ASYMMETRIC_DECRYPT CryptoKey_CryptoKeyPurpose = 6 + // [CryptoKeys][google.cloud.kms.v1.CryptoKey] with this purpose may be used + // with [MacSign][google.cloud.kms.v1.KeyManagementService.MacSign]. + CryptoKey_MAC CryptoKey_CryptoKeyPurpose = 9 +) + +// Enum value maps for CryptoKey_CryptoKeyPurpose. +var ( + CryptoKey_CryptoKeyPurpose_name = map[int32]string{ + 0: "CRYPTO_KEY_PURPOSE_UNSPECIFIED", + 1: "ENCRYPT_DECRYPT", + 5: "ASYMMETRIC_SIGN", + 6: "ASYMMETRIC_DECRYPT", + 9: "MAC", + } + CryptoKey_CryptoKeyPurpose_value = map[string]int32{ + "CRYPTO_KEY_PURPOSE_UNSPECIFIED": 0, + "ENCRYPT_DECRYPT": 1, + "ASYMMETRIC_SIGN": 5, + "ASYMMETRIC_DECRYPT": 6, + "MAC": 9, + } +) + +func (x CryptoKey_CryptoKeyPurpose) Enum() *CryptoKey_CryptoKeyPurpose { + p := new(CryptoKey_CryptoKeyPurpose) + *p = x + return p +} + +func (x CryptoKey_CryptoKeyPurpose) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CryptoKey_CryptoKeyPurpose) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_resources_proto_enumTypes[1].Descriptor() +} + +func (CryptoKey_CryptoKeyPurpose) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_resources_proto_enumTypes[1] +} + +func (x CryptoKey_CryptoKeyPurpose) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CryptoKey_CryptoKeyPurpose.Descriptor instead. +func (CryptoKey_CryptoKeyPurpose) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{1, 0} +} + +// Attestation formats provided by the HSM. +type KeyOperationAttestation_AttestationFormat int32 + +const ( + // Not specified. + KeyOperationAttestation_ATTESTATION_FORMAT_UNSPECIFIED KeyOperationAttestation_AttestationFormat = 0 + // Cavium HSM attestation compressed with gzip. Note that this format is + // defined by Cavium and subject to change at any time. + // + // See + // https://www.marvell.com/products/security-solutions/nitrox-hs-adapters/software-key-attestation.html. + KeyOperationAttestation_CAVIUM_V1_COMPRESSED KeyOperationAttestation_AttestationFormat = 3 + // Cavium HSM attestation V2 compressed with gzip. This is a new format + // introduced in Cavium's version 3.2-08. + KeyOperationAttestation_CAVIUM_V2_COMPRESSED KeyOperationAttestation_AttestationFormat = 4 +) + +// Enum value maps for KeyOperationAttestation_AttestationFormat. +var ( + KeyOperationAttestation_AttestationFormat_name = map[int32]string{ + 0: "ATTESTATION_FORMAT_UNSPECIFIED", + 3: "CAVIUM_V1_COMPRESSED", + 4: "CAVIUM_V2_COMPRESSED", + } + KeyOperationAttestation_AttestationFormat_value = map[string]int32{ + "ATTESTATION_FORMAT_UNSPECIFIED": 0, + "CAVIUM_V1_COMPRESSED": 3, + "CAVIUM_V2_COMPRESSED": 4, + } +) + +func (x KeyOperationAttestation_AttestationFormat) Enum() *KeyOperationAttestation_AttestationFormat { + p := new(KeyOperationAttestation_AttestationFormat) + *p = x + return p +} + +func (x KeyOperationAttestation_AttestationFormat) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (KeyOperationAttestation_AttestationFormat) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_resources_proto_enumTypes[2].Descriptor() +} + +func (KeyOperationAttestation_AttestationFormat) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_resources_proto_enumTypes[2] +} + +func (x KeyOperationAttestation_AttestationFormat) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use KeyOperationAttestation_AttestationFormat.Descriptor instead. +func (KeyOperationAttestation_AttestationFormat) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{3, 0} +} + +// The algorithm of the +// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], indicating what +// parameters must be used for each cryptographic operation. +// +// The +// [GOOGLE_SYMMETRIC_ENCRYPTION][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm.GOOGLE_SYMMETRIC_ENCRYPTION] +// algorithm is usable with +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. +// +// Algorithms beginning with "RSA_SIGN_" are usable with +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN]. +// +// The fields in the name after "RSA_SIGN_" correspond to the following +// parameters: padding algorithm, modulus bit length, and digest algorithm. +// +// For PSS, the salt length used is equal to the length of digest +// algorithm. For example, +// [RSA_SIGN_PSS_2048_SHA256][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm.RSA_SIGN_PSS_2048_SHA256] +// will use PSS with a salt length of 256 bits or 32 bytes. +// +// Algorithms beginning with "RSA_DECRYPT_" are usable with +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT]. +// +// The fields in the name after "RSA_DECRYPT_" correspond to the following +// parameters: padding algorithm, modulus bit length, and digest algorithm. +// +// Algorithms beginning with "EC_SIGN_" are usable with +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN]. +// +// The fields in the name after "EC_SIGN_" correspond to the following +// parameters: elliptic curve, digest algorithm. +// +// Algorithms beginning with "HMAC_" are usable with +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [MAC][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.MAC]. +// +// The suffix following "HMAC_" corresponds to the hash algorithm being used +// (eg. SHA256). +// +// For more information, see [Key purposes and algorithms] +// (https://cloud.google.com/kms/docs/algorithms). +type CryptoKeyVersion_CryptoKeyVersionAlgorithm int32 + +const ( + // Not specified. + CryptoKeyVersion_CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED CryptoKeyVersion_CryptoKeyVersionAlgorithm = 0 + // Creates symmetric encryption keys. + CryptoKeyVersion_GOOGLE_SYMMETRIC_ENCRYPTION CryptoKeyVersion_CryptoKeyVersionAlgorithm = 1 + // RSASSA-PSS 2048 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PSS_2048_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 2 + // RSASSA-PSS 3072 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PSS_3072_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 3 + // RSASSA-PSS 4096 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PSS_4096_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 4 + // RSASSA-PSS 4096 bit key with a SHA512 digest. + CryptoKeyVersion_RSA_SIGN_PSS_4096_SHA512 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 15 + // RSASSA-PKCS1-v1_5 with a 2048 bit key and a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PKCS1_2048_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 5 + // RSASSA-PKCS1-v1_5 with a 3072 bit key and a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PKCS1_3072_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 6 + // RSASSA-PKCS1-v1_5 with a 4096 bit key and a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PKCS1_4096_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 7 + // RSASSA-PKCS1-v1_5 with a 4096 bit key and a SHA512 digest. + CryptoKeyVersion_RSA_SIGN_PKCS1_4096_SHA512 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 16 + // RSASSA-PKCS1-v1_5 signing without encoding, with a 2048 bit key. + CryptoKeyVersion_RSA_SIGN_RAW_PKCS1_2048 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 28 + // RSASSA-PKCS1-v1_5 signing without encoding, with a 3072 bit key. + CryptoKeyVersion_RSA_SIGN_RAW_PKCS1_3072 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 29 + // RSASSA-PKCS1-v1_5 signing without encoding, with a 4096 bit key. + CryptoKeyVersion_RSA_SIGN_RAW_PKCS1_4096 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 30 + // RSAES-OAEP 2048 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_DECRYPT_OAEP_2048_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 8 + // RSAES-OAEP 3072 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_DECRYPT_OAEP_3072_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 9 + // RSAES-OAEP 4096 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_DECRYPT_OAEP_4096_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 10 + // RSAES-OAEP 4096 bit key with a SHA512 digest. + CryptoKeyVersion_RSA_DECRYPT_OAEP_4096_SHA512 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 17 + // RSAES-OAEP 2048 bit key with a SHA1 digest. + CryptoKeyVersion_RSA_DECRYPT_OAEP_2048_SHA1 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 37 + // RSAES-OAEP 3072 bit key with a SHA1 digest. + CryptoKeyVersion_RSA_DECRYPT_OAEP_3072_SHA1 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 38 + // RSAES-OAEP 4096 bit key with a SHA1 digest. + CryptoKeyVersion_RSA_DECRYPT_OAEP_4096_SHA1 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 39 + // ECDSA on the NIST P-256 curve with a SHA256 digest. + CryptoKeyVersion_EC_SIGN_P256_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 12 + // ECDSA on the NIST P-384 curve with a SHA384 digest. + CryptoKeyVersion_EC_SIGN_P384_SHA384 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 13 + // ECDSA on the non-NIST secp256k1 curve. This curve is only supported for + // HSM protection level. + CryptoKeyVersion_EC_SIGN_SECP256K1_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 31 + // HMAC-SHA256 signing with a 256 bit key. + CryptoKeyVersion_HMAC_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 32 + // HMAC-SHA1 signing with a 160 bit key. + CryptoKeyVersion_HMAC_SHA1 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 33 + // HMAC-SHA384 signing with a 384 bit key. + CryptoKeyVersion_HMAC_SHA384 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 34 + // HMAC-SHA512 signing with a 512 bit key. + CryptoKeyVersion_HMAC_SHA512 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 35 + // HMAC-SHA224 signing with a 224 bit key. + CryptoKeyVersion_HMAC_SHA224 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 36 + // Algorithm representing symmetric encryption by an external key manager. + CryptoKeyVersion_EXTERNAL_SYMMETRIC_ENCRYPTION CryptoKeyVersion_CryptoKeyVersionAlgorithm = 18 +) + +// Enum value maps for CryptoKeyVersion_CryptoKeyVersionAlgorithm. +var ( + CryptoKeyVersion_CryptoKeyVersionAlgorithm_name = map[int32]string{ + 0: "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED", + 1: "GOOGLE_SYMMETRIC_ENCRYPTION", + 2: "RSA_SIGN_PSS_2048_SHA256", + 3: "RSA_SIGN_PSS_3072_SHA256", + 4: "RSA_SIGN_PSS_4096_SHA256", + 15: "RSA_SIGN_PSS_4096_SHA512", + 5: "RSA_SIGN_PKCS1_2048_SHA256", + 6: "RSA_SIGN_PKCS1_3072_SHA256", + 7: "RSA_SIGN_PKCS1_4096_SHA256", + 16: "RSA_SIGN_PKCS1_4096_SHA512", + 28: "RSA_SIGN_RAW_PKCS1_2048", + 29: "RSA_SIGN_RAW_PKCS1_3072", + 30: "RSA_SIGN_RAW_PKCS1_4096", + 8: "RSA_DECRYPT_OAEP_2048_SHA256", + 9: "RSA_DECRYPT_OAEP_3072_SHA256", + 10: "RSA_DECRYPT_OAEP_4096_SHA256", + 17: "RSA_DECRYPT_OAEP_4096_SHA512", + 37: "RSA_DECRYPT_OAEP_2048_SHA1", + 38: "RSA_DECRYPT_OAEP_3072_SHA1", + 39: "RSA_DECRYPT_OAEP_4096_SHA1", + 12: "EC_SIGN_P256_SHA256", + 13: "EC_SIGN_P384_SHA384", + 31: "EC_SIGN_SECP256K1_SHA256", + 32: "HMAC_SHA256", + 33: "HMAC_SHA1", + 34: "HMAC_SHA384", + 35: "HMAC_SHA512", + 36: "HMAC_SHA224", + 18: "EXTERNAL_SYMMETRIC_ENCRYPTION", + } + CryptoKeyVersion_CryptoKeyVersionAlgorithm_value = map[string]int32{ + "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED": 0, + "GOOGLE_SYMMETRIC_ENCRYPTION": 1, + "RSA_SIGN_PSS_2048_SHA256": 2, + "RSA_SIGN_PSS_3072_SHA256": 3, + "RSA_SIGN_PSS_4096_SHA256": 4, + "RSA_SIGN_PSS_4096_SHA512": 15, + "RSA_SIGN_PKCS1_2048_SHA256": 5, + "RSA_SIGN_PKCS1_3072_SHA256": 6, + "RSA_SIGN_PKCS1_4096_SHA256": 7, + "RSA_SIGN_PKCS1_4096_SHA512": 16, + "RSA_SIGN_RAW_PKCS1_2048": 28, + "RSA_SIGN_RAW_PKCS1_3072": 29, + "RSA_SIGN_RAW_PKCS1_4096": 30, + "RSA_DECRYPT_OAEP_2048_SHA256": 8, + "RSA_DECRYPT_OAEP_3072_SHA256": 9, + "RSA_DECRYPT_OAEP_4096_SHA256": 10, + "RSA_DECRYPT_OAEP_4096_SHA512": 17, + "RSA_DECRYPT_OAEP_2048_SHA1": 37, + "RSA_DECRYPT_OAEP_3072_SHA1": 38, + "RSA_DECRYPT_OAEP_4096_SHA1": 39, + "EC_SIGN_P256_SHA256": 12, + "EC_SIGN_P384_SHA384": 13, + "EC_SIGN_SECP256K1_SHA256": 31, + "HMAC_SHA256": 32, + "HMAC_SHA1": 33, + "HMAC_SHA384": 34, + "HMAC_SHA512": 35, + "HMAC_SHA224": 36, + "EXTERNAL_SYMMETRIC_ENCRYPTION": 18, + } +) + +func (x CryptoKeyVersion_CryptoKeyVersionAlgorithm) Enum() *CryptoKeyVersion_CryptoKeyVersionAlgorithm { + p := new(CryptoKeyVersion_CryptoKeyVersionAlgorithm) + *p = x + return p +} + +func (x CryptoKeyVersion_CryptoKeyVersionAlgorithm) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CryptoKeyVersion_CryptoKeyVersionAlgorithm) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_resources_proto_enumTypes[3].Descriptor() +} + +func (CryptoKeyVersion_CryptoKeyVersionAlgorithm) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_resources_proto_enumTypes[3] +} + +func (x CryptoKeyVersion_CryptoKeyVersionAlgorithm) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CryptoKeyVersion_CryptoKeyVersionAlgorithm.Descriptor instead. +func (CryptoKeyVersion_CryptoKeyVersionAlgorithm) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{4, 0} +} + +// The state of a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], +// indicating if it can be used. +type CryptoKeyVersion_CryptoKeyVersionState int32 + +const ( + // Not specified. + CryptoKeyVersion_CRYPTO_KEY_VERSION_STATE_UNSPECIFIED CryptoKeyVersion_CryptoKeyVersionState = 0 + // This version is still being generated. It may not be used, enabled, + // disabled, or destroyed yet. Cloud KMS will automatically mark this + // version + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] + // as soon as the version is ready. + CryptoKeyVersion_PENDING_GENERATION CryptoKeyVersion_CryptoKeyVersionState = 5 + // This version may be used for cryptographic operations. + CryptoKeyVersion_ENABLED CryptoKeyVersion_CryptoKeyVersionState = 1 + // This version may not be used, but the key material is still available, + // and the version can be placed back into the + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] + // state. + CryptoKeyVersion_DISABLED CryptoKeyVersion_CryptoKeyVersionState = 2 + // This version is destroyed, and the key material is no longer stored. + // This version may only become + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] + // again if this version is + // [reimport_eligible][google.cloud.kms.v1.CryptoKeyVersion.reimport_eligible] + // and the original key material is reimported with a call to + // [KeyManagementService.ImportCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion]. + CryptoKeyVersion_DESTROYED CryptoKeyVersion_CryptoKeyVersionState = 3 + // This version is scheduled for destruction, and will be destroyed soon. + // Call + // [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] + // to put it back into the + // [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED] + // state. + CryptoKeyVersion_DESTROY_SCHEDULED CryptoKeyVersion_CryptoKeyVersionState = 4 + // This version is still being imported. It may not be used, enabled, + // disabled, or destroyed yet. Cloud KMS will automatically mark this + // version + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] + // as soon as the version is ready. + CryptoKeyVersion_PENDING_IMPORT CryptoKeyVersion_CryptoKeyVersionState = 6 + // This version was not imported successfully. It may not be used, enabled, + // disabled, or destroyed. The submitted key material has been discarded. + // Additional details can be found in + // [CryptoKeyVersion.import_failure_reason][google.cloud.kms.v1.CryptoKeyVersion.import_failure_reason]. + CryptoKeyVersion_IMPORT_FAILED CryptoKeyVersion_CryptoKeyVersionState = 7 +) + +// Enum value maps for CryptoKeyVersion_CryptoKeyVersionState. +var ( + CryptoKeyVersion_CryptoKeyVersionState_name = map[int32]string{ + 0: "CRYPTO_KEY_VERSION_STATE_UNSPECIFIED", + 5: "PENDING_GENERATION", + 1: "ENABLED", + 2: "DISABLED", + 3: "DESTROYED", + 4: "DESTROY_SCHEDULED", + 6: "PENDING_IMPORT", + 7: "IMPORT_FAILED", + } + CryptoKeyVersion_CryptoKeyVersionState_value = map[string]int32{ + "CRYPTO_KEY_VERSION_STATE_UNSPECIFIED": 0, + "PENDING_GENERATION": 5, + "ENABLED": 1, + "DISABLED": 2, + "DESTROYED": 3, + "DESTROY_SCHEDULED": 4, + "PENDING_IMPORT": 6, + "IMPORT_FAILED": 7, + } +) + +func (x CryptoKeyVersion_CryptoKeyVersionState) Enum() *CryptoKeyVersion_CryptoKeyVersionState { + p := new(CryptoKeyVersion_CryptoKeyVersionState) + *p = x + return p +} + +func (x CryptoKeyVersion_CryptoKeyVersionState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CryptoKeyVersion_CryptoKeyVersionState) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_resources_proto_enumTypes[4].Descriptor() +} + +func (CryptoKeyVersion_CryptoKeyVersionState) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_resources_proto_enumTypes[4] +} + +func (x CryptoKeyVersion_CryptoKeyVersionState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CryptoKeyVersion_CryptoKeyVersionState.Descriptor instead. +func (CryptoKeyVersion_CryptoKeyVersionState) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{4, 1} +} + +// A view for [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]s. +// Controls the level of detail returned for +// [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] in +// [KeyManagementService.ListCryptoKeyVersions][google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions] +// and +// [KeyManagementService.ListCryptoKeys][google.cloud.kms.v1.KeyManagementService.ListCryptoKeys]. +type CryptoKeyVersion_CryptoKeyVersionView int32 + +const ( + // Default view for each + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. Does not + // include the + // [attestation][google.cloud.kms.v1.CryptoKeyVersion.attestation] field. + CryptoKeyVersion_CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED CryptoKeyVersion_CryptoKeyVersionView = 0 + // Provides all fields in each + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], including the + // [attestation][google.cloud.kms.v1.CryptoKeyVersion.attestation]. + CryptoKeyVersion_FULL CryptoKeyVersion_CryptoKeyVersionView = 1 +) + +// Enum value maps for CryptoKeyVersion_CryptoKeyVersionView. +var ( + CryptoKeyVersion_CryptoKeyVersionView_name = map[int32]string{ + 0: "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED", + 1: "FULL", + } + CryptoKeyVersion_CryptoKeyVersionView_value = map[string]int32{ + "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED": 0, + "FULL": 1, + } +) + +func (x CryptoKeyVersion_CryptoKeyVersionView) Enum() *CryptoKeyVersion_CryptoKeyVersionView { + p := new(CryptoKeyVersion_CryptoKeyVersionView) + *p = x + return p +} + +func (x CryptoKeyVersion_CryptoKeyVersionView) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CryptoKeyVersion_CryptoKeyVersionView) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_resources_proto_enumTypes[5].Descriptor() +} + +func (CryptoKeyVersion_CryptoKeyVersionView) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_resources_proto_enumTypes[5] +} + +func (x CryptoKeyVersion_CryptoKeyVersionView) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CryptoKeyVersion_CryptoKeyVersionView.Descriptor instead. +func (CryptoKeyVersion_CryptoKeyVersionView) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{4, 2} +} + +// [ImportMethod][google.cloud.kms.v1.ImportJob.ImportMethod] describes the +// key wrapping method chosen for this +// [ImportJob][google.cloud.kms.v1.ImportJob]. +type ImportJob_ImportMethod int32 + +const ( + // Not specified. + ImportJob_IMPORT_METHOD_UNSPECIFIED ImportJob_ImportMethod = 0 + // This ImportMethod represents the CKM_RSA_AES_KEY_WRAP key wrapping + // scheme defined in the PKCS #11 standard. In summary, this involves + // wrapping the raw key with an ephemeral AES key, and wrapping the + // ephemeral AES key with a 3072 bit RSA key. For more details, see + // [RSA AES key wrap + // mechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cos01/pkcs11-curr-v2.40-cos01.html#_Toc408226908). + ImportJob_RSA_OAEP_3072_SHA1_AES_256 ImportJob_ImportMethod = 1 + // This ImportMethod represents the CKM_RSA_AES_KEY_WRAP key wrapping + // scheme defined in the PKCS #11 standard. In summary, this involves + // wrapping the raw key with an ephemeral AES key, and wrapping the + // ephemeral AES key with a 4096 bit RSA key. For more details, see + // [RSA AES key wrap + // mechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cos01/pkcs11-curr-v2.40-cos01.html#_Toc408226908). + ImportJob_RSA_OAEP_4096_SHA1_AES_256 ImportJob_ImportMethod = 2 + // This ImportMethod represents the CKM_RSA_AES_KEY_WRAP key wrapping + // scheme defined in the PKCS #11 standard. In summary, this involves + // wrapping the raw key with an ephemeral AES key, and wrapping the + // ephemeral AES key with a 3072 bit RSA key. For more details, see + // [RSA AES key wrap + // mechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cos01/pkcs11-curr-v2.40-cos01.html#_Toc408226908). + ImportJob_RSA_OAEP_3072_SHA256_AES_256 ImportJob_ImportMethod = 3 + // This ImportMethod represents the CKM_RSA_AES_KEY_WRAP key wrapping + // scheme defined in the PKCS #11 standard. In summary, this involves + // wrapping the raw key with an ephemeral AES key, and wrapping the + // ephemeral AES key with a 4096 bit RSA key. For more details, see + // [RSA AES key wrap + // mechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cos01/pkcs11-curr-v2.40-cos01.html#_Toc408226908). + ImportJob_RSA_OAEP_4096_SHA256_AES_256 ImportJob_ImportMethod = 4 + // This ImportMethod represents RSAES-OAEP with a 3072 bit RSA key. The + // key material to be imported is wrapped directly with the RSA key. Due + // to technical limitations of RSA wrapping, this method cannot be used to + // wrap RSA keys for import. + ImportJob_RSA_OAEP_3072_SHA256 ImportJob_ImportMethod = 5 + // This ImportMethod represents RSAES-OAEP with a 4096 bit RSA key. The + // key material to be imported is wrapped directly with the RSA key. Due + // to technical limitations of RSA wrapping, this method cannot be used to + // wrap RSA keys for import. + ImportJob_RSA_OAEP_4096_SHA256 ImportJob_ImportMethod = 6 +) + +// Enum value maps for ImportJob_ImportMethod. +var ( + ImportJob_ImportMethod_name = map[int32]string{ + 0: "IMPORT_METHOD_UNSPECIFIED", + 1: "RSA_OAEP_3072_SHA1_AES_256", + 2: "RSA_OAEP_4096_SHA1_AES_256", + 3: "RSA_OAEP_3072_SHA256_AES_256", + 4: "RSA_OAEP_4096_SHA256_AES_256", + 5: "RSA_OAEP_3072_SHA256", + 6: "RSA_OAEP_4096_SHA256", + } + ImportJob_ImportMethod_value = map[string]int32{ + "IMPORT_METHOD_UNSPECIFIED": 0, + "RSA_OAEP_3072_SHA1_AES_256": 1, + "RSA_OAEP_4096_SHA1_AES_256": 2, + "RSA_OAEP_3072_SHA256_AES_256": 3, + "RSA_OAEP_4096_SHA256_AES_256": 4, + "RSA_OAEP_3072_SHA256": 5, + "RSA_OAEP_4096_SHA256": 6, + } +) + +func (x ImportJob_ImportMethod) Enum() *ImportJob_ImportMethod { + p := new(ImportJob_ImportMethod) + *p = x + return p +} + +func (x ImportJob_ImportMethod) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ImportJob_ImportMethod) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_resources_proto_enumTypes[6].Descriptor() +} + +func (ImportJob_ImportMethod) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_resources_proto_enumTypes[6] +} + +func (x ImportJob_ImportMethod) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ImportJob_ImportMethod.Descriptor instead. +func (ImportJob_ImportMethod) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{6, 0} +} + +// The state of the [ImportJob][google.cloud.kms.v1.ImportJob], indicating if +// it can be used. +type ImportJob_ImportJobState int32 + +const ( + // Not specified. + ImportJob_IMPORT_JOB_STATE_UNSPECIFIED ImportJob_ImportJobState = 0 + // The wrapping key for this job is still being generated. It may not be + // used. Cloud KMS will automatically mark this job as + // [ACTIVE][google.cloud.kms.v1.ImportJob.ImportJobState.ACTIVE] as soon as + // the wrapping key is generated. + ImportJob_PENDING_GENERATION ImportJob_ImportJobState = 1 + // This job may be used in + // [CreateCryptoKey][google.cloud.kms.v1.KeyManagementService.CreateCryptoKey] + // and + // [CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion] + // requests. + ImportJob_ACTIVE ImportJob_ImportJobState = 2 + // This job can no longer be used and may not leave this state once entered. + ImportJob_EXPIRED ImportJob_ImportJobState = 3 +) + +// Enum value maps for ImportJob_ImportJobState. +var ( + ImportJob_ImportJobState_name = map[int32]string{ + 0: "IMPORT_JOB_STATE_UNSPECIFIED", + 1: "PENDING_GENERATION", + 2: "ACTIVE", + 3: "EXPIRED", + } + ImportJob_ImportJobState_value = map[string]int32{ + "IMPORT_JOB_STATE_UNSPECIFIED": 0, + "PENDING_GENERATION": 1, + "ACTIVE": 2, + "EXPIRED": 3, + } +) + +func (x ImportJob_ImportJobState) Enum() *ImportJob_ImportJobState { + p := new(ImportJob_ImportJobState) + *p = x + return p +} + +func (x ImportJob_ImportJobState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ImportJob_ImportJobState) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_resources_proto_enumTypes[7].Descriptor() +} + +func (ImportJob_ImportJobState) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_resources_proto_enumTypes[7] +} + +func (x ImportJob_ImportJobState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ImportJob_ImportJobState.Descriptor instead. +func (ImportJob_ImportJobState) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{6, 1} +} + +// A [KeyRing][google.cloud.kms.v1.KeyRing] is a toplevel logical grouping of +// [CryptoKeys][google.cloud.kms.v1.CryptoKey]. +type KeyRing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The resource name for the + // [KeyRing][google.cloud.kms.v1.KeyRing] in the format + // `projects/*/locations/*/keyRings/*`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. The time at which this [KeyRing][google.cloud.kms.v1.KeyRing] + // was created. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` +} + +func (x *KeyRing) Reset() { + *x = KeyRing{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyRing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyRing) ProtoMessage() {} + +func (x *KeyRing) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyRing.ProtoReflect.Descriptor instead. +func (*KeyRing) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{0} +} + +func (x *KeyRing) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *KeyRing) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +// A [CryptoKey][google.cloud.kms.v1.CryptoKey] represents a logical key that +// can be used for cryptographic operations. +// +// A [CryptoKey][google.cloud.kms.v1.CryptoKey] is made up of zero or more +// [versions][google.cloud.kms.v1.CryptoKeyVersion], which represent the actual +// key material used in cryptographic operations. +type CryptoKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The resource name for this + // [CryptoKey][google.cloud.kms.v1.CryptoKey] in the format + // `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. A copy of the "primary" + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] that will be used + // by [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt] when this + // [CryptoKey][google.cloud.kms.v1.CryptoKey] is given in + // [EncryptRequest.name][google.cloud.kms.v1.EncryptRequest.name]. + // + // The [CryptoKey][google.cloud.kms.v1.CryptoKey]'s primary version can be + // updated via + // [UpdateCryptoKeyPrimaryVersion][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion]. + // + // Keys with [purpose][google.cloud.kms.v1.CryptoKey.purpose] + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT] + // may have a primary. For other keys, this field will be omitted. + Primary *CryptoKeyVersion `protobuf:"bytes,2,opt,name=primary,proto3" json:"primary,omitempty"` + // Immutable. The immutable purpose of this + // [CryptoKey][google.cloud.kms.v1.CryptoKey]. + Purpose CryptoKey_CryptoKeyPurpose `protobuf:"varint,3,opt,name=purpose,proto3,enum=google.cloud.kms.v1.CryptoKey_CryptoKeyPurpose" json:"purpose,omitempty"` + // Output only. The time at which this + // [CryptoKey][google.cloud.kms.v1.CryptoKey] was created. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // At [next_rotation_time][google.cloud.kms.v1.CryptoKey.next_rotation_time], + // the Key Management Service will automatically: + // + // 1. Create a new version of this [CryptoKey][google.cloud.kms.v1.CryptoKey]. + // 2. Mark the new version as primary. + // + // Key rotations performed manually via + // [CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion] + // and + // [UpdateCryptoKeyPrimaryVersion][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion] + // do not affect + // [next_rotation_time][google.cloud.kms.v1.CryptoKey.next_rotation_time]. + // + // Keys with [purpose][google.cloud.kms.v1.CryptoKey.purpose] + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT] + // support automatic rotation. For other keys, this field must be omitted. + NextRotationTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=next_rotation_time,json=nextRotationTime,proto3" json:"next_rotation_time,omitempty"` + // Controls the rate of automatic rotation. + // + // Types that are assignable to RotationSchedule: + // + // *CryptoKey_RotationPeriod + RotationSchedule isCryptoKey_RotationSchedule `protobuf_oneof:"rotation_schedule"` + // A template describing settings for new + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] instances. The + // properties of new [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // instances created by either + // [CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion] + // or auto-rotation are controlled by this template. + VersionTemplate *CryptoKeyVersionTemplate `protobuf:"bytes,11,opt,name=version_template,json=versionTemplate,proto3" json:"version_template,omitempty"` + // Labels with user-defined metadata. For more information, see + // [Labeling Keys](https://cloud.google.com/kms/docs/labeling-keys). + Labels map[string]string `protobuf:"bytes,10,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Immutable. Whether this key may contain imported versions only. + ImportOnly bool `protobuf:"varint,13,opt,name=import_only,json=importOnly,proto3" json:"import_only,omitempty"` + // Immutable. The period of time that versions of this key spend in the + // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED] + // state before transitioning to + // [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED]. + // If not specified at creation time, the default duration is 24 hours. + DestroyScheduledDuration *durationpb.Duration `protobuf:"bytes,14,opt,name=destroy_scheduled_duration,json=destroyScheduledDuration,proto3" json:"destroy_scheduled_duration,omitempty"` + // Immutable. The resource name of the backend environment where the key + // material for all [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] + // associated with this [CryptoKey][google.cloud.kms.v1.CryptoKey] reside and + // where all related cryptographic operations are performed. Only applicable + // if [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] have a + // [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of + // [EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC], with the + // resource name in the format `projects/*/locations/*/ekmConnections/*`. + // Note, this list is non-exhaustive and may apply to additional + // [ProtectionLevels][google.cloud.kms.v1.ProtectionLevel] in the future. + CryptoKeyBackend string `protobuf:"bytes,15,opt,name=crypto_key_backend,json=cryptoKeyBackend,proto3" json:"crypto_key_backend,omitempty"` +} + +func (x *CryptoKey) Reset() { + *x = CryptoKey{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CryptoKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CryptoKey) ProtoMessage() {} + +func (x *CryptoKey) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CryptoKey.ProtoReflect.Descriptor instead. +func (*CryptoKey) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{1} +} + +func (x *CryptoKey) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CryptoKey) GetPrimary() *CryptoKeyVersion { + if x != nil { + return x.Primary + } + return nil +} + +func (x *CryptoKey) GetPurpose() CryptoKey_CryptoKeyPurpose { + if x != nil { + return x.Purpose + } + return CryptoKey_CRYPTO_KEY_PURPOSE_UNSPECIFIED +} + +func (x *CryptoKey) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *CryptoKey) GetNextRotationTime() *timestamppb.Timestamp { + if x != nil { + return x.NextRotationTime + } + return nil +} + +func (m *CryptoKey) GetRotationSchedule() isCryptoKey_RotationSchedule { + if m != nil { + return m.RotationSchedule + } + return nil +} + +func (x *CryptoKey) GetRotationPeriod() *durationpb.Duration { + if x, ok := x.GetRotationSchedule().(*CryptoKey_RotationPeriod); ok { + return x.RotationPeriod + } + return nil +} + +func (x *CryptoKey) GetVersionTemplate() *CryptoKeyVersionTemplate { + if x != nil { + return x.VersionTemplate + } + return nil +} + +func (x *CryptoKey) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *CryptoKey) GetImportOnly() bool { + if x != nil { + return x.ImportOnly + } + return false +} + +func (x *CryptoKey) GetDestroyScheduledDuration() *durationpb.Duration { + if x != nil { + return x.DestroyScheduledDuration + } + return nil +} + +func (x *CryptoKey) GetCryptoKeyBackend() string { + if x != nil { + return x.CryptoKeyBackend + } + return "" +} + +type isCryptoKey_RotationSchedule interface { + isCryptoKey_RotationSchedule() +} + +type CryptoKey_RotationPeriod struct { + // [next_rotation_time][google.cloud.kms.v1.CryptoKey.next_rotation_time] + // will be advanced by this period when the service automatically rotates a + // key. Must be at least 24 hours and at most 876,000 hours. + // + // If [rotation_period][google.cloud.kms.v1.CryptoKey.rotation_period] is + // set, + // [next_rotation_time][google.cloud.kms.v1.CryptoKey.next_rotation_time] + // must also be set. + // + // Keys with [purpose][google.cloud.kms.v1.CryptoKey.purpose] + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT] + // support automatic rotation. For other keys, this field must be omitted. + RotationPeriod *durationpb.Duration `protobuf:"bytes,8,opt,name=rotation_period,json=rotationPeriod,proto3,oneof"` +} + +func (*CryptoKey_RotationPeriod) isCryptoKey_RotationSchedule() {} + +// A [CryptoKeyVersionTemplate][google.cloud.kms.v1.CryptoKeyVersionTemplate] +// specifies the properties to use when creating a new +// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], either manually +// with +// [CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion] +// or automatically as a result of auto-rotation. +type CryptoKeyVersionTemplate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] to use when creating + // a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] based on this + // template. Immutable. Defaults to + // [SOFTWARE][google.cloud.kms.v1.ProtectionLevel.SOFTWARE]. + ProtectionLevel ProtectionLevel `protobuf:"varint,1,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` + // Required. + // [Algorithm][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm] + // to use when creating a + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] based on this + // template. + // + // For backwards compatibility, GOOGLE_SYMMETRIC_ENCRYPTION is implied if both + // this field is omitted and + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] is + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + Algorithm CryptoKeyVersion_CryptoKeyVersionAlgorithm `protobuf:"varint,3,opt,name=algorithm,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionAlgorithm" json:"algorithm,omitempty"` +} + +func (x *CryptoKeyVersionTemplate) Reset() { + *x = CryptoKeyVersionTemplate{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CryptoKeyVersionTemplate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CryptoKeyVersionTemplate) ProtoMessage() {} + +func (x *CryptoKeyVersionTemplate) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CryptoKeyVersionTemplate.ProtoReflect.Descriptor instead. +func (*CryptoKeyVersionTemplate) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{2} +} + +func (x *CryptoKeyVersionTemplate) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +func (x *CryptoKeyVersionTemplate) GetAlgorithm() CryptoKeyVersion_CryptoKeyVersionAlgorithm { + if x != nil { + return x.Algorithm + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED +} + +// Contains an HSM-generated attestation about a key operation. For more +// information, see [Verifying attestations] +// (https://cloud.google.com/kms/docs/attest-key). +type KeyOperationAttestation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The format of the attestation data. + Format KeyOperationAttestation_AttestationFormat `protobuf:"varint,4,opt,name=format,proto3,enum=google.cloud.kms.v1.KeyOperationAttestation_AttestationFormat" json:"format,omitempty"` + // Output only. The attestation data provided by the HSM when the key + // operation was performed. + Content []byte `protobuf:"bytes,5,opt,name=content,proto3" json:"content,omitempty"` + // Output only. The certificate chains needed to validate the attestation + CertChains *KeyOperationAttestation_CertificateChains `protobuf:"bytes,6,opt,name=cert_chains,json=certChains,proto3" json:"cert_chains,omitempty"` +} + +func (x *KeyOperationAttestation) Reset() { + *x = KeyOperationAttestation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyOperationAttestation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyOperationAttestation) ProtoMessage() {} + +func (x *KeyOperationAttestation) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyOperationAttestation.ProtoReflect.Descriptor instead. +func (*KeyOperationAttestation) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{3} +} + +func (x *KeyOperationAttestation) GetFormat() KeyOperationAttestation_AttestationFormat { + if x != nil { + return x.Format + } + return KeyOperationAttestation_ATTESTATION_FORMAT_UNSPECIFIED +} + +func (x *KeyOperationAttestation) GetContent() []byte { + if x != nil { + return x.Content + } + return nil +} + +func (x *KeyOperationAttestation) GetCertChains() *KeyOperationAttestation_CertificateChains { + if x != nil { + return x.CertChains + } + return nil +} + +// A [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] represents an +// individual cryptographic key, and the associated key material. +// +// An +// [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] +// version can be used for cryptographic operations. +// +// For security reasons, the raw cryptographic key material represented by a +// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] can never be viewed +// or exported. It can only be used to encrypt, decrypt, or sign data when an +// authorized user or application invokes Cloud KMS. +type CryptoKeyVersion struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The resource name for this + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in the format + // `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The current state of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + State CryptoKeyVersion_CryptoKeyVersionState `protobuf:"varint,3,opt,name=state,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionState" json:"state,omitempty"` + // Output only. The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] + // describing how crypto operations are performed with this + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + ProtectionLevel ProtectionLevel `protobuf:"varint,7,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` + // Output only. The + // [CryptoKeyVersionAlgorithm][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm] + // that this [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // supports. + Algorithm CryptoKeyVersion_CryptoKeyVersionAlgorithm `protobuf:"varint,10,opt,name=algorithm,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionAlgorithm" json:"algorithm,omitempty"` + // Output only. Statement that was generated and signed by the HSM at key + // creation time. Use this statement to verify attributes of the key as stored + // on the HSM, independently of Google. Only provided for key versions with + // [protection_level][google.cloud.kms.v1.CryptoKeyVersion.protection_level] + // [HSM][google.cloud.kms.v1.ProtectionLevel.HSM]. + Attestation *KeyOperationAttestation `protobuf:"bytes,8,opt,name=attestation,proto3" json:"attestation,omitempty"` + // Output only. The time at which this + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] was created. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Output only. The time this + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s key material was + // generated. + GenerateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=generate_time,json=generateTime,proto3" json:"generate_time,omitempty"` + // Output only. The time this + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s key material is + // scheduled for destruction. Only present if + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] is + // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED]. + DestroyTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=destroy_time,json=destroyTime,proto3" json:"destroy_time,omitempty"` + // Output only. The time this CryptoKeyVersion's key material was + // destroyed. Only present if + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] is + // [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED]. + DestroyEventTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=destroy_event_time,json=destroyEventTime,proto3" json:"destroy_event_time,omitempty"` + // Output only. The name of the [ImportJob][google.cloud.kms.v1.ImportJob] + // used in the most recent import of this + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. Only present if + // the underlying key material was imported. + ImportJob string `protobuf:"bytes,14,opt,name=import_job,json=importJob,proto3" json:"import_job,omitempty"` + // Output only. The time at which this + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s key material was + // most recently imported. + ImportTime *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=import_time,json=importTime,proto3" json:"import_time,omitempty"` + // Output only. The root cause of the most recent import failure. Only present + // if [state][google.cloud.kms.v1.CryptoKeyVersion.state] is + // [IMPORT_FAILED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.IMPORT_FAILED]. + ImportFailureReason string `protobuf:"bytes,16,opt,name=import_failure_reason,json=importFailureReason,proto3" json:"import_failure_reason,omitempty"` + // ExternalProtectionLevelOptions stores a group of additional fields for + // configuring a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] that + // are specific to the + // [EXTERNAL][google.cloud.kms.v1.ProtectionLevel.EXTERNAL] protection level + // and [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC] + // protection levels. + ExternalProtectionLevelOptions *ExternalProtectionLevelOptions `protobuf:"bytes,17,opt,name=external_protection_level_options,json=externalProtectionLevelOptions,proto3" json:"external_protection_level_options,omitempty"` + // Output only. Whether or not this key version is eligible for reimport, by + // being specified as a target in + // [ImportCryptoKeyVersionRequest.crypto_key_version][google.cloud.kms.v1.ImportCryptoKeyVersionRequest.crypto_key_version]. + ReimportEligible bool `protobuf:"varint,18,opt,name=reimport_eligible,json=reimportEligible,proto3" json:"reimport_eligible,omitempty"` +} + +func (x *CryptoKeyVersion) Reset() { + *x = CryptoKeyVersion{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CryptoKeyVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CryptoKeyVersion) ProtoMessage() {} + +func (x *CryptoKeyVersion) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CryptoKeyVersion.ProtoReflect.Descriptor instead. +func (*CryptoKeyVersion) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{4} +} + +func (x *CryptoKeyVersion) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CryptoKeyVersion) GetState() CryptoKeyVersion_CryptoKeyVersionState { + if x != nil { + return x.State + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_STATE_UNSPECIFIED +} + +func (x *CryptoKeyVersion) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +func (x *CryptoKeyVersion) GetAlgorithm() CryptoKeyVersion_CryptoKeyVersionAlgorithm { + if x != nil { + return x.Algorithm + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED +} + +func (x *CryptoKeyVersion) GetAttestation() *KeyOperationAttestation { + if x != nil { + return x.Attestation + } + return nil +} + +func (x *CryptoKeyVersion) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *CryptoKeyVersion) GetGenerateTime() *timestamppb.Timestamp { + if x != nil { + return x.GenerateTime + } + return nil +} + +func (x *CryptoKeyVersion) GetDestroyTime() *timestamppb.Timestamp { + if x != nil { + return x.DestroyTime + } + return nil +} + +func (x *CryptoKeyVersion) GetDestroyEventTime() *timestamppb.Timestamp { + if x != nil { + return x.DestroyEventTime + } + return nil +} + +func (x *CryptoKeyVersion) GetImportJob() string { + if x != nil { + return x.ImportJob + } + return "" +} + +func (x *CryptoKeyVersion) GetImportTime() *timestamppb.Timestamp { + if x != nil { + return x.ImportTime + } + return nil +} + +func (x *CryptoKeyVersion) GetImportFailureReason() string { + if x != nil { + return x.ImportFailureReason + } + return "" +} + +func (x *CryptoKeyVersion) GetExternalProtectionLevelOptions() *ExternalProtectionLevelOptions { + if x != nil { + return x.ExternalProtectionLevelOptions + } + return nil +} + +func (x *CryptoKeyVersion) GetReimportEligible() bool { + if x != nil { + return x.ReimportEligible + } + return false +} + +// The public key for a given +// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. Obtained via +// [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. +type PublicKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The public key, encoded in PEM format. For more information, see the + // [RFC 7468](https://tools.ietf.org/html/rfc7468) sections for + // [General Considerations](https://tools.ietf.org/html/rfc7468#section-2) and + // [Textual Encoding of Subject Public Key Info] + // (https://tools.ietf.org/html/rfc7468#section-13). + Pem string `protobuf:"bytes,1,opt,name=pem,proto3" json:"pem,omitempty"` + // The + // [Algorithm][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm] + // associated with this key. + Algorithm CryptoKeyVersion_CryptoKeyVersionAlgorithm `protobuf:"varint,2,opt,name=algorithm,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionAlgorithm" json:"algorithm,omitempty"` + // Integrity verification field. A CRC32C checksum of the returned + // [PublicKey.pem][google.cloud.kms.v1.PublicKey.pem]. An integrity check of + // [PublicKey.pem][google.cloud.kms.v1.PublicKey.pem] can be performed by + // computing the CRC32C checksum of + // [PublicKey.pem][google.cloud.kms.v1.PublicKey.pem] and comparing your + // results to this field. Discard the response in case of non-matching + // checksum values, and perform a limited number of retries. A persistent + // mismatch may indicate an issue in your computation of the CRC32C checksum. + // Note: This field is defined as int64 for reasons of compatibility across + // different languages. However, it is a non-negative integer, which will + // never exceed 2^32-1, and can be safely downconverted to uint32 in languages + // that support this type. + // + // NOTE: This field is in Beta. + PemCrc32C *wrapperspb.Int64Value `protobuf:"bytes,3,opt,name=pem_crc32c,json=pemCrc32c,proto3" json:"pem_crc32c,omitempty"` + // The [name][google.cloud.kms.v1.CryptoKeyVersion.name] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] public key. + // Provided here for verification. + // + // NOTE: This field is in Beta. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] public key. + ProtectionLevel ProtectionLevel `protobuf:"varint,5,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` +} + +func (x *PublicKey) Reset() { + *x = PublicKey{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublicKey) ProtoMessage() {} + +func (x *PublicKey) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublicKey.ProtoReflect.Descriptor instead. +func (*PublicKey) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{5} +} + +func (x *PublicKey) GetPem() string { + if x != nil { + return x.Pem + } + return "" +} + +func (x *PublicKey) GetAlgorithm() CryptoKeyVersion_CryptoKeyVersionAlgorithm { + if x != nil { + return x.Algorithm + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED +} + +func (x *PublicKey) GetPemCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.PemCrc32C + } + return nil +} + +func (x *PublicKey) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *PublicKey) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +// An [ImportJob][google.cloud.kms.v1.ImportJob] can be used to create +// [CryptoKeys][google.cloud.kms.v1.CryptoKey] and +// [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] using pre-existing +// key material, generated outside of Cloud KMS. +// +// When an [ImportJob][google.cloud.kms.v1.ImportJob] is created, Cloud KMS will +// generate a "wrapping key", which is a public/private key pair. You use the +// wrapping key to encrypt (also known as wrap) the pre-existing key material to +// protect it during the import process. The nature of the wrapping key depends +// on the choice of +// [import_method][google.cloud.kms.v1.ImportJob.import_method]. When the +// wrapping key generation is complete, the +// [state][google.cloud.kms.v1.ImportJob.state] will be set to +// [ACTIVE][google.cloud.kms.v1.ImportJob.ImportJobState.ACTIVE] and the +// [public_key][google.cloud.kms.v1.ImportJob.public_key] can be fetched. The +// fetched public key can then be used to wrap your pre-existing key material. +// +// Once the key material is wrapped, it can be imported into a new +// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in an existing +// [CryptoKey][google.cloud.kms.v1.CryptoKey] by calling +// [ImportCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion]. +// Multiple [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] can be +// imported with a single [ImportJob][google.cloud.kms.v1.ImportJob]. Cloud KMS +// uses the private key portion of the wrapping key to unwrap the key material. +// Only Cloud KMS has access to the private key. +// +// An [ImportJob][google.cloud.kms.v1.ImportJob] expires 3 days after it is +// created. Once expired, Cloud KMS will no longer be able to import or unwrap +// any key material that was wrapped with the +// [ImportJob][google.cloud.kms.v1.ImportJob]'s public key. +// +// For more information, see +// [Importing a key](https://cloud.google.com/kms/docs/importing-a-key). +type ImportJob struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The resource name for this + // [ImportJob][google.cloud.kms.v1.ImportJob] in the format + // `projects/*/locations/*/keyRings/*/importJobs/*`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. Immutable. The wrapping method to be used for incoming key + // material. + ImportMethod ImportJob_ImportMethod `protobuf:"varint,2,opt,name=import_method,json=importMethod,proto3,enum=google.cloud.kms.v1.ImportJob_ImportMethod" json:"import_method,omitempty"` + // Required. Immutable. The protection level of the + // [ImportJob][google.cloud.kms.v1.ImportJob]. This must match the + // [protection_level][google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level] + // of the [version_template][google.cloud.kms.v1.CryptoKey.version_template] + // on the [CryptoKey][google.cloud.kms.v1.CryptoKey] you attempt to import + // into. + ProtectionLevel ProtectionLevel `protobuf:"varint,9,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` + // Output only. The time at which this + // [ImportJob][google.cloud.kms.v1.ImportJob] was created. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Output only. The time this [ImportJob][google.cloud.kms.v1.ImportJob]'s key + // material was generated. + GenerateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=generate_time,json=generateTime,proto3" json:"generate_time,omitempty"` + // Output only. The time at which this + // [ImportJob][google.cloud.kms.v1.ImportJob] is scheduled for expiration and + // can no longer be used to import key material. + ExpireTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` + // Output only. The time this [ImportJob][google.cloud.kms.v1.ImportJob] + // expired. Only present if [state][google.cloud.kms.v1.ImportJob.state] is + // [EXPIRED][google.cloud.kms.v1.ImportJob.ImportJobState.EXPIRED]. + ExpireEventTime *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=expire_event_time,json=expireEventTime,proto3" json:"expire_event_time,omitempty"` + // Output only. The current state of the + // [ImportJob][google.cloud.kms.v1.ImportJob], indicating if it can be used. + State ImportJob_ImportJobState `protobuf:"varint,6,opt,name=state,proto3,enum=google.cloud.kms.v1.ImportJob_ImportJobState" json:"state,omitempty"` + // Output only. The public key with which to wrap key material prior to + // import. Only returned if [state][google.cloud.kms.v1.ImportJob.state] is + // [ACTIVE][google.cloud.kms.v1.ImportJob.ImportJobState.ACTIVE]. + PublicKey *ImportJob_WrappingPublicKey `protobuf:"bytes,7,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + // Output only. Statement that was generated and signed by the key creator + // (for example, an HSM) at key creation time. Use this statement to verify + // attributes of the key as stored on the HSM, independently of Google. + // Only present if the chosen + // [ImportMethod][google.cloud.kms.v1.ImportJob.ImportMethod] is one with a + // protection level of [HSM][google.cloud.kms.v1.ProtectionLevel.HSM]. + Attestation *KeyOperationAttestation `protobuf:"bytes,8,opt,name=attestation,proto3" json:"attestation,omitempty"` +} + +func (x *ImportJob) Reset() { + *x = ImportJob{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportJob) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportJob) ProtoMessage() {} + +func (x *ImportJob) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportJob.ProtoReflect.Descriptor instead. +func (*ImportJob) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{6} +} + +func (x *ImportJob) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ImportJob) GetImportMethod() ImportJob_ImportMethod { + if x != nil { + return x.ImportMethod + } + return ImportJob_IMPORT_METHOD_UNSPECIFIED +} + +func (x *ImportJob) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +func (x *ImportJob) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *ImportJob) GetGenerateTime() *timestamppb.Timestamp { + if x != nil { + return x.GenerateTime + } + return nil +} + +func (x *ImportJob) GetExpireTime() *timestamppb.Timestamp { + if x != nil { + return x.ExpireTime + } + return nil +} + +func (x *ImportJob) GetExpireEventTime() *timestamppb.Timestamp { + if x != nil { + return x.ExpireEventTime + } + return nil +} + +func (x *ImportJob) GetState() ImportJob_ImportJobState { + if x != nil { + return x.State + } + return ImportJob_IMPORT_JOB_STATE_UNSPECIFIED +} + +func (x *ImportJob) GetPublicKey() *ImportJob_WrappingPublicKey { + if x != nil { + return x.PublicKey + } + return nil +} + +func (x *ImportJob) GetAttestation() *KeyOperationAttestation { + if x != nil { + return x.Attestation + } + return nil +} + +// ExternalProtectionLevelOptions stores a group of additional fields for +// configuring a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] that +// are specific to the [EXTERNAL][google.cloud.kms.v1.ProtectionLevel.EXTERNAL] +// protection level and +// [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC] protection +// levels. +type ExternalProtectionLevelOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The URI for an external resource that this + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] represents. + ExternalKeyUri string `protobuf:"bytes,1,opt,name=external_key_uri,json=externalKeyUri,proto3" json:"external_key_uri,omitempty"` + // The path to the external key material on the EKM when using + // [EkmConnection][google.cloud.kms.v1.EkmConnection] e.g., "v0/my/key". Set + // this field instead of external_key_uri when using an + // [EkmConnection][google.cloud.kms.v1.EkmConnection]. + EkmConnectionKeyPath string `protobuf:"bytes,2,opt,name=ekm_connection_key_path,json=ekmConnectionKeyPath,proto3" json:"ekm_connection_key_path,omitempty"` +} + +func (x *ExternalProtectionLevelOptions) Reset() { + *x = ExternalProtectionLevelOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExternalProtectionLevelOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalProtectionLevelOptions) ProtoMessage() {} + +func (x *ExternalProtectionLevelOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExternalProtectionLevelOptions.ProtoReflect.Descriptor instead. +func (*ExternalProtectionLevelOptions) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{7} +} + +func (x *ExternalProtectionLevelOptions) GetExternalKeyUri() string { + if x != nil { + return x.ExternalKeyUri + } + return "" +} + +func (x *ExternalProtectionLevelOptions) GetEkmConnectionKeyPath() string { + if x != nil { + return x.EkmConnectionKeyPath + } + return "" +} + +// Certificate chains needed to verify the attestation. +// Certificates in chains are PEM-encoded and are ordered based on +// https://tools.ietf.org/html/rfc5246#section-7.4.2. +type KeyOperationAttestation_CertificateChains struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Cavium certificate chain corresponding to the attestation. + CaviumCerts []string `protobuf:"bytes,1,rep,name=cavium_certs,json=caviumCerts,proto3" json:"cavium_certs,omitempty"` + // Google card certificate chain corresponding to the attestation. + GoogleCardCerts []string `protobuf:"bytes,2,rep,name=google_card_certs,json=googleCardCerts,proto3" json:"google_card_certs,omitempty"` + // Google partition certificate chain corresponding to the attestation. + GooglePartitionCerts []string `protobuf:"bytes,3,rep,name=google_partition_certs,json=googlePartitionCerts,proto3" json:"google_partition_certs,omitempty"` +} + +func (x *KeyOperationAttestation_CertificateChains) Reset() { + *x = KeyOperationAttestation_CertificateChains{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyOperationAttestation_CertificateChains) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyOperationAttestation_CertificateChains) ProtoMessage() {} + +func (x *KeyOperationAttestation_CertificateChains) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyOperationAttestation_CertificateChains.ProtoReflect.Descriptor instead. +func (*KeyOperationAttestation_CertificateChains) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *KeyOperationAttestation_CertificateChains) GetCaviumCerts() []string { + if x != nil { + return x.CaviumCerts + } + return nil +} + +func (x *KeyOperationAttestation_CertificateChains) GetGoogleCardCerts() []string { + if x != nil { + return x.GoogleCardCerts + } + return nil +} + +func (x *KeyOperationAttestation_CertificateChains) GetGooglePartitionCerts() []string { + if x != nil { + return x.GooglePartitionCerts + } + return nil +} + +// The public key component of the wrapping key. For details of the type of +// key this public key corresponds to, see the +// [ImportMethod][google.cloud.kms.v1.ImportJob.ImportMethod]. +type ImportJob_WrappingPublicKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The public key, encoded in PEM format. For more information, see the [RFC + // 7468](https://tools.ietf.org/html/rfc7468) sections for [General + // Considerations](https://tools.ietf.org/html/rfc7468#section-2) and + // [Textual Encoding of Subject Public Key Info] + // (https://tools.ietf.org/html/rfc7468#section-13). + Pem string `protobuf:"bytes,1,opt,name=pem,proto3" json:"pem,omitempty"` +} + +func (x *ImportJob_WrappingPublicKey) Reset() { + *x = ImportJob_WrappingPublicKey{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportJob_WrappingPublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportJob_WrappingPublicKey) ProtoMessage() {} + +func (x *ImportJob_WrappingPublicKey) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportJob_WrappingPublicKey.ProtoReflect.Descriptor instead. +func (*ImportJob_WrappingPublicKey) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *ImportJob_WrappingPublicKey) GetPem() string { + if x != nil { + return x.Pem + } + return "" +} + +var File_google_cloud_kms_v1_resources_proto protoreflect.FileDescriptor + +var file_google_cloud_kms_v1_resources_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b, + 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, + 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc7, 0x01, 0x0a, 0x07, 0x4b, 0x65, 0x79, 0x52, + 0x69, 0x6e, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x3a, 0x61, + 0xea, 0x41, 0x5e, 0x0a, 0x1f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4b, 0x65, 0x79, + 0x52, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, + 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, + 0x7d, 0x22, 0xb8, 0x08, 0x0a, 0x09, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, + 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x4e, + 0x0a, 0x07, 0x70, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x2e, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, + 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x07, 0x70, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x40, + 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x48, 0x0a, 0x12, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x6e, 0x65, 0x78, 0x74, 0x52, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x0f, 0x72, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, + 0x52, 0x0e, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, + 0x12, 0x58, 0x0a, 0x10, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x0f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x42, 0x0a, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x24, + 0x0a, 0x0b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, + 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x5c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, + 0x79, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, + 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x03, 0x0a, 0x01, 0x2a, 0x52, 0x10, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x81, 0x01, 0x0a, 0x10, 0x43, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x1e, 0x43, + 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x50, 0x55, 0x52, 0x50, 0x4f, 0x53, + 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, + 0x50, 0x54, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x53, 0x59, 0x4d, 0x4d, 0x45, 0x54, 0x52, + 0x49, 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x10, 0x05, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x53, 0x59, + 0x4d, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, + 0x06, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x43, 0x10, 0x09, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, + 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, + 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x42, 0x13, 0x0a, 0x11, 0x72, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x22, 0xcf, 0x01, 0x0a, + 0x18, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, + 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x6c, + 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x22, 0x83, + 0x04, 0x0a, 0x17, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, + 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x06, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x74, 0x74, + 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x64, 0x0a, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x74, + 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x1a, 0x98, 0x01, 0x0a, + 0x11, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x61, 0x76, 0x69, 0x75, 0x6d, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x61, 0x76, 0x69, 0x75, 0x6d, + 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, + 0x63, 0x61, 0x72, 0x64, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x43, 0x61, 0x72, 0x64, 0x43, 0x65, 0x72, 0x74, + 0x73, 0x12, 0x34, 0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x65, 0x72, 0x74, 0x73, 0x22, 0x6b, 0x0a, 0x11, 0x41, 0x74, 0x74, 0x65, 0x73, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x22, 0x0a, 0x1e, + 0x41, 0x54, 0x54, 0x45, 0x53, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, + 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, 0x56, 0x49, 0x55, 0x4d, 0x5f, 0x56, 0x31, 0x5f, 0x43, 0x4f, + 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x45, 0x44, 0x10, 0x03, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, + 0x56, 0x49, 0x55, 0x4d, 0x5f, 0x56, 0x32, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, + 0x45, 0x44, 0x10, 0x04, 0x22, 0x9f, 0x12, 0x0a, 0x10, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x54, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x62, 0x0a, 0x09, 0x61, + 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, + 0x53, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0c, + 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x4d, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x5f, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x64, + 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x22, 0x0a, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, + 0x4a, 0x6f, 0x62, 0x12, 0x40, 0x0a, 0x0b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x15, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, + 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x69, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x7e, + 0x0a, 0x21, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x1e, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, + 0x0a, 0x11, 0x72, 0x65, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x65, 0x6c, 0x69, 0x67, 0x69, + 0x62, 0x6c, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, + 0x72, 0x65, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6c, 0x69, 0x67, 0x69, 0x62, 0x6c, 0x65, + 0x22, 0xe7, 0x06, 0x0a, 0x19, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x2c, + 0x0a, 0x28, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, + 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x4c, 0x47, 0x4f, 0x52, 0x49, 0x54, 0x48, 0x4d, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1f, 0x0a, 0x1b, + 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x53, 0x59, 0x4d, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, + 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x1c, 0x0a, + 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x32, 0x30, + 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, 0x12, 0x1c, 0x0a, 0x18, 0x52, + 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x33, 0x30, 0x37, 0x32, + 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x53, 0x41, + 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, + 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, + 0x35, 0x31, 0x32, 0x10, 0x0f, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, + 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x05, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, + 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x06, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, + 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, + 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, + 0x35, 0x31, 0x32, 0x10, 0x10, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, + 0x4e, 0x5f, 0x52, 0x41, 0x57, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x32, 0x30, 0x34, 0x38, + 0x10, 0x1c, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, + 0x41, 0x57, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x10, 0x1d, 0x12, + 0x1b, 0x0a, 0x17, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x41, 0x57, 0x5f, + 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x10, 0x1e, 0x12, 0x20, 0x0a, 0x1c, + 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, + 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x08, 0x12, 0x20, + 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, + 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x09, + 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, + 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, + 0x10, 0x0a, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, + 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x35, + 0x31, 0x32, 0x10, 0x11, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, + 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, + 0x41, 0x31, 0x10, 0x25, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, + 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, + 0x41, 0x31, 0x10, 0x26, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, + 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, + 0x41, 0x31, 0x10, 0x27, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, + 0x50, 0x32, 0x35, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0c, 0x12, 0x17, 0x0a, + 0x13, 0x45, 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x33, 0x38, 0x34, 0x5f, 0x53, 0x48, + 0x41, 0x33, 0x38, 0x34, 0x10, 0x0d, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x43, 0x5f, 0x53, 0x49, 0x47, + 0x4e, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x4b, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x10, 0x1f, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x20, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, + 0x41, 0x31, 0x10, 0x21, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, + 0x33, 0x38, 0x34, 0x10, 0x22, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, + 0x41, 0x35, 0x31, 0x32, 0x10, 0x23, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x32, 0x34, 0x10, 0x24, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x58, 0x54, 0x45, 0x52, + 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x59, 0x4d, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x45, 0x4e, + 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x12, 0x22, 0xc1, 0x01, 0x0a, 0x15, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x24, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b, + 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, + 0x0a, 0x12, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, + 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x4e, 0x41, 0x42, 0x4c, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, + 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, + 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, 0x59, 0x5f, 0x53, 0x43, 0x48, 0x45, + 0x44, 0x55, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x45, 0x4e, 0x44, 0x49, + 0x4e, 0x47, 0x5f, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x49, + 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x07, 0x22, 0x49, + 0x0a, 0x14, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x56, 0x69, 0x65, 0x77, 0x12, 0x27, 0x0a, 0x23, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, + 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x56, 0x49, 0x45, + 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x01, 0x3a, 0xaa, 0x01, 0xea, 0x41, 0xa6, 0x01, + 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x7a, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, + 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0xce, 0x03, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, + 0x63, 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x70, 0x65, 0x6d, 0x12, 0x5d, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, + 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, + 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x65, 0x6d, 0x5f, 0x63, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, + 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x65, 0x6d, 0x43, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0xae, 0x01, 0xea, 0x41, 0xaa, 0x01, 0x0a, 0x21, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, + 0x12, 0x84, 0x01, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, + 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x70, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0xd4, 0x09, 0x0a, 0x09, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x58, + 0x0a, 0x0d, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0c, 0x69, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x57, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, + 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x78, 0x70, + 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x11, 0x65, + 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x48, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, + 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, + 0x62, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x54, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x50, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, + 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x25, 0x0a, + 0x11, 0x57, 0x72, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, + 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x70, 0x65, 0x6d, 0x22, 0xe5, 0x01, 0x0a, 0x0c, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1d, 0x0a, 0x19, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, + 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, + 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, + 0x35, 0x36, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, + 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, + 0x35, 0x36, 0x10, 0x02, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, + 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x5f, 0x41, 0x45, 0x53, + 0x5f, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, + 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x5f, 0x41, + 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x53, 0x41, 0x5f, + 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, + 0x10, 0x05, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, + 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x22, 0x63, 0x0a, 0x0e, + 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, + 0x0a, 0x1c, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4a, 0x4f, 0x42, 0x5f, 0x53, 0x54, 0x41, + 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x16, 0x0a, 0x12, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x47, 0x45, 0x4e, 0x45, + 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, + 0x56, 0x45, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x58, 0x50, 0x49, 0x52, 0x45, 0x44, 0x10, + 0x03, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, + 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, + 0x73, 0x2f, 0x7b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x7d, 0x22, 0x81, + 0x01, 0x0a, 0x1e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x55, 0x72, 0x69, 0x12, 0x35, 0x0a, 0x17, 0x65, + 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x65, 0x6b, + 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x50, 0x61, + 0x74, 0x68, 0x2a, 0x6a, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x50, 0x52, 0x4f, 0x54, 0x45, 0x43, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4f, 0x46, 0x54, 0x57, + 0x41, 0x52, 0x45, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x48, 0x53, 0x4d, 0x10, 0x02, 0x12, 0x0c, + 0x0a, 0x08, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, + 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x56, 0x50, 0x43, 0x10, 0x04, 0x42, 0x95, + 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x4b, 0x6d, 0x73, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x36, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b, 0x6d, 0x73, + 0x2f, 0x76, 0x31, 0x3b, 0x6b, 0x6d, 0x73, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, + 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, + 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_cloud_kms_v1_resources_proto_rawDescOnce sync.Once + file_google_cloud_kms_v1_resources_proto_rawDescData = file_google_cloud_kms_v1_resources_proto_rawDesc +) + +func file_google_cloud_kms_v1_resources_proto_rawDescGZIP() []byte { + file_google_cloud_kms_v1_resources_proto_rawDescOnce.Do(func() { + file_google_cloud_kms_v1_resources_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_kms_v1_resources_proto_rawDescData) + }) + return file_google_cloud_kms_v1_resources_proto_rawDescData +} + +var file_google_cloud_kms_v1_resources_proto_enumTypes = make([]protoimpl.EnumInfo, 8) +var file_google_cloud_kms_v1_resources_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_google_cloud_kms_v1_resources_proto_goTypes = []interface{}{ + (ProtectionLevel)(0), // 0: google.cloud.kms.v1.ProtectionLevel + (CryptoKey_CryptoKeyPurpose)(0), // 1: google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose + (KeyOperationAttestation_AttestationFormat)(0), // 2: google.cloud.kms.v1.KeyOperationAttestation.AttestationFormat + (CryptoKeyVersion_CryptoKeyVersionAlgorithm)(0), // 3: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm + (CryptoKeyVersion_CryptoKeyVersionState)(0), // 4: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState + (CryptoKeyVersion_CryptoKeyVersionView)(0), // 5: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionView + (ImportJob_ImportMethod)(0), // 6: google.cloud.kms.v1.ImportJob.ImportMethod + (ImportJob_ImportJobState)(0), // 7: google.cloud.kms.v1.ImportJob.ImportJobState + (*KeyRing)(nil), // 8: google.cloud.kms.v1.KeyRing + (*CryptoKey)(nil), // 9: google.cloud.kms.v1.CryptoKey + (*CryptoKeyVersionTemplate)(nil), // 10: google.cloud.kms.v1.CryptoKeyVersionTemplate + (*KeyOperationAttestation)(nil), // 11: google.cloud.kms.v1.KeyOperationAttestation + (*CryptoKeyVersion)(nil), // 12: google.cloud.kms.v1.CryptoKeyVersion + (*PublicKey)(nil), // 13: google.cloud.kms.v1.PublicKey + (*ImportJob)(nil), // 14: google.cloud.kms.v1.ImportJob + (*ExternalProtectionLevelOptions)(nil), // 15: google.cloud.kms.v1.ExternalProtectionLevelOptions + nil, // 16: google.cloud.kms.v1.CryptoKey.LabelsEntry + (*KeyOperationAttestation_CertificateChains)(nil), // 17: google.cloud.kms.v1.KeyOperationAttestation.CertificateChains + (*ImportJob_WrappingPublicKey)(nil), // 18: google.cloud.kms.v1.ImportJob.WrappingPublicKey + (*timestamppb.Timestamp)(nil), // 19: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 20: google.protobuf.Duration + (*wrapperspb.Int64Value)(nil), // 21: google.protobuf.Int64Value +} +var file_google_cloud_kms_v1_resources_proto_depIdxs = []int32{ + 19, // 0: google.cloud.kms.v1.KeyRing.create_time:type_name -> google.protobuf.Timestamp + 12, // 1: google.cloud.kms.v1.CryptoKey.primary:type_name -> google.cloud.kms.v1.CryptoKeyVersion + 1, // 2: google.cloud.kms.v1.CryptoKey.purpose:type_name -> google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose + 19, // 3: google.cloud.kms.v1.CryptoKey.create_time:type_name -> google.protobuf.Timestamp + 19, // 4: google.cloud.kms.v1.CryptoKey.next_rotation_time:type_name -> google.protobuf.Timestamp + 20, // 5: google.cloud.kms.v1.CryptoKey.rotation_period:type_name -> google.protobuf.Duration + 10, // 6: google.cloud.kms.v1.CryptoKey.version_template:type_name -> google.cloud.kms.v1.CryptoKeyVersionTemplate + 16, // 7: google.cloud.kms.v1.CryptoKey.labels:type_name -> google.cloud.kms.v1.CryptoKey.LabelsEntry + 20, // 8: google.cloud.kms.v1.CryptoKey.destroy_scheduled_duration:type_name -> google.protobuf.Duration + 0, // 9: google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 3, // 10: google.cloud.kms.v1.CryptoKeyVersionTemplate.algorithm:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm + 2, // 11: google.cloud.kms.v1.KeyOperationAttestation.format:type_name -> google.cloud.kms.v1.KeyOperationAttestation.AttestationFormat + 17, // 12: google.cloud.kms.v1.KeyOperationAttestation.cert_chains:type_name -> google.cloud.kms.v1.KeyOperationAttestation.CertificateChains + 4, // 13: google.cloud.kms.v1.CryptoKeyVersion.state:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState + 0, // 14: google.cloud.kms.v1.CryptoKeyVersion.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 3, // 15: google.cloud.kms.v1.CryptoKeyVersion.algorithm:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm + 11, // 16: google.cloud.kms.v1.CryptoKeyVersion.attestation:type_name -> google.cloud.kms.v1.KeyOperationAttestation + 19, // 17: google.cloud.kms.v1.CryptoKeyVersion.create_time:type_name -> google.protobuf.Timestamp + 19, // 18: google.cloud.kms.v1.CryptoKeyVersion.generate_time:type_name -> google.protobuf.Timestamp + 19, // 19: google.cloud.kms.v1.CryptoKeyVersion.destroy_time:type_name -> google.protobuf.Timestamp + 19, // 20: google.cloud.kms.v1.CryptoKeyVersion.destroy_event_time:type_name -> google.protobuf.Timestamp + 19, // 21: google.cloud.kms.v1.CryptoKeyVersion.import_time:type_name -> google.protobuf.Timestamp + 15, // 22: google.cloud.kms.v1.CryptoKeyVersion.external_protection_level_options:type_name -> google.cloud.kms.v1.ExternalProtectionLevelOptions + 3, // 23: google.cloud.kms.v1.PublicKey.algorithm:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm + 21, // 24: google.cloud.kms.v1.PublicKey.pem_crc32c:type_name -> google.protobuf.Int64Value + 0, // 25: google.cloud.kms.v1.PublicKey.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 6, // 26: google.cloud.kms.v1.ImportJob.import_method:type_name -> google.cloud.kms.v1.ImportJob.ImportMethod + 0, // 27: google.cloud.kms.v1.ImportJob.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 19, // 28: google.cloud.kms.v1.ImportJob.create_time:type_name -> google.protobuf.Timestamp + 19, // 29: google.cloud.kms.v1.ImportJob.generate_time:type_name -> google.protobuf.Timestamp + 19, // 30: google.cloud.kms.v1.ImportJob.expire_time:type_name -> google.protobuf.Timestamp + 19, // 31: google.cloud.kms.v1.ImportJob.expire_event_time:type_name -> google.protobuf.Timestamp + 7, // 32: google.cloud.kms.v1.ImportJob.state:type_name -> google.cloud.kms.v1.ImportJob.ImportJobState + 18, // 33: google.cloud.kms.v1.ImportJob.public_key:type_name -> google.cloud.kms.v1.ImportJob.WrappingPublicKey + 11, // 34: google.cloud.kms.v1.ImportJob.attestation:type_name -> google.cloud.kms.v1.KeyOperationAttestation + 35, // [35:35] is the sub-list for method output_type + 35, // [35:35] is the sub-list for method input_type + 35, // [35:35] is the sub-list for extension type_name + 35, // [35:35] is the sub-list for extension extendee + 0, // [0:35] is the sub-list for field type_name +} + +func init() { file_google_cloud_kms_v1_resources_proto_init() } +func file_google_cloud_kms_v1_resources_proto_init() { + if File_google_cloud_kms_v1_resources_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_cloud_kms_v1_resources_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyRing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CryptoKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CryptoKeyVersionTemplate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyOperationAttestation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CryptoKeyVersion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublicKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportJob); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExternalProtectionLevelOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyOperationAttestation_CertificateChains); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportJob_WrappingPublicKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*CryptoKey_RotationPeriod)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_cloud_kms_v1_resources_proto_rawDesc, + NumEnums: 8, + NumMessages: 11, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_cloud_kms_v1_resources_proto_goTypes, + DependencyIndexes: file_google_cloud_kms_v1_resources_proto_depIdxs, + EnumInfos: file_google_cloud_kms_v1_resources_proto_enumTypes, + MessageInfos: file_google_cloud_kms_v1_resources_proto_msgTypes, + }.Build() + File_google_cloud_kms_v1_resources_proto = out.File + file_google_cloud_kms_v1_resources_proto_rawDesc = nil + file_google_cloud_kms_v1_resources_proto_goTypes = nil + file_google_cloud_kms_v1_resources_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go new file mode 100644 index 00000000000..d345c167dab --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go @@ -0,0 +1,6284 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.9 +// source: google/cloud/kms/v1/service.proto + +package kmspb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Request message for +// [KeyManagementService.ListKeyRings][google.cloud.kms.v1.KeyManagementService.ListKeyRings]. +type ListKeyRingsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the location associated with the + // [KeyRings][google.cloud.kms.v1.KeyRing], in the format + // `projects/*/locations/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. Optional limit on the number of + // [KeyRings][google.cloud.kms.v1.KeyRing] to include in the response. Further + // [KeyRings][google.cloud.kms.v1.KeyRing] can subsequently be obtained by + // including the + // [ListKeyRingsResponse.next_page_token][google.cloud.kms.v1.ListKeyRingsResponse.next_page_token] + // in a subsequent request. If unspecified, the server will pick an + // appropriate default. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. Optional pagination token, returned earlier via + // [ListKeyRingsResponse.next_page_token][google.cloud.kms.v1.ListKeyRingsResponse.next_page_token]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // Optional. Only include resources that match the filter in the response. For + // more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. Specify how the results should be sorted. If not specified, the + // results will be sorted in the default order. For more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + OrderBy string `protobuf:"bytes,5,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` +} + +func (x *ListKeyRingsRequest) Reset() { + *x = ListKeyRingsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListKeyRingsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListKeyRingsRequest) ProtoMessage() {} + +func (x *ListKeyRingsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListKeyRingsRequest.ProtoReflect.Descriptor instead. +func (*ListKeyRingsRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ListKeyRingsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListKeyRingsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListKeyRingsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListKeyRingsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListKeyRingsRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +// Request message for +// [KeyManagementService.ListCryptoKeys][google.cloud.kms.v1.KeyManagementService.ListCryptoKeys]. +type ListCryptoKeysRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the [KeyRing][google.cloud.kms.v1.KeyRing] + // to list, in the format `projects/*/locations/*/keyRings/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. Optional limit on the number of + // [CryptoKeys][google.cloud.kms.v1.CryptoKey] to include in the response. + // Further [CryptoKeys][google.cloud.kms.v1.CryptoKey] can subsequently be + // obtained by including the + // [ListCryptoKeysResponse.next_page_token][google.cloud.kms.v1.ListCryptoKeysResponse.next_page_token] + // in a subsequent request. If unspecified, the server will pick an + // appropriate default. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. Optional pagination token, returned earlier via + // [ListCryptoKeysResponse.next_page_token][google.cloud.kms.v1.ListCryptoKeysResponse.next_page_token]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // The fields of the primary version to include in the response. + VersionView CryptoKeyVersion_CryptoKeyVersionView `protobuf:"varint,4,opt,name=version_view,json=versionView,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionView" json:"version_view,omitempty"` + // Optional. Only include resources that match the filter in the response. For + // more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. Specify how the results should be sorted. If not specified, the + // results will be sorted in the default order. For more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` +} + +func (x *ListCryptoKeysRequest) Reset() { + *x = ListCryptoKeysRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListCryptoKeysRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListCryptoKeysRequest) ProtoMessage() {} + +func (x *ListCryptoKeysRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListCryptoKeysRequest.ProtoReflect.Descriptor instead. +func (*ListCryptoKeysRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListCryptoKeysRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListCryptoKeysRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListCryptoKeysRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListCryptoKeysRequest) GetVersionView() CryptoKeyVersion_CryptoKeyVersionView { + if x != nil { + return x.VersionView + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED +} + +func (x *ListCryptoKeysRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListCryptoKeysRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +// Request message for +// [KeyManagementService.ListCryptoKeyVersions][google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions]. +type ListCryptoKeyVersionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKey][google.cloud.kms.v1.CryptoKey] to list, in the format + // `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. Optional limit on the number of + // [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] to include in the + // response. Further [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] + // can subsequently be obtained by including the + // [ListCryptoKeyVersionsResponse.next_page_token][google.cloud.kms.v1.ListCryptoKeyVersionsResponse.next_page_token] + // in a subsequent request. If unspecified, the server will pick an + // appropriate default. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. Optional pagination token, returned earlier via + // [ListCryptoKeyVersionsResponse.next_page_token][google.cloud.kms.v1.ListCryptoKeyVersionsResponse.next_page_token]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // The fields to include in the response. + View CryptoKeyVersion_CryptoKeyVersionView `protobuf:"varint,4,opt,name=view,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionView" json:"view,omitempty"` + // Optional. Only include resources that match the filter in the response. For + // more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. Specify how the results should be sorted. If not specified, the + // results will be sorted in the default order. For more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` +} + +func (x *ListCryptoKeyVersionsRequest) Reset() { + *x = ListCryptoKeyVersionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListCryptoKeyVersionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListCryptoKeyVersionsRequest) ProtoMessage() {} + +func (x *ListCryptoKeyVersionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListCryptoKeyVersionsRequest.ProtoReflect.Descriptor instead. +func (*ListCryptoKeyVersionsRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ListCryptoKeyVersionsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListCryptoKeyVersionsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListCryptoKeyVersionsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListCryptoKeyVersionsRequest) GetView() CryptoKeyVersion_CryptoKeyVersionView { + if x != nil { + return x.View + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED +} + +func (x *ListCryptoKeyVersionsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListCryptoKeyVersionsRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +// Request message for +// [KeyManagementService.ListImportJobs][google.cloud.kms.v1.KeyManagementService.ListImportJobs]. +type ListImportJobsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the [KeyRing][google.cloud.kms.v1.KeyRing] + // to list, in the format `projects/*/locations/*/keyRings/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. Optional limit on the number of + // [ImportJobs][google.cloud.kms.v1.ImportJob] to include in the response. + // Further [ImportJobs][google.cloud.kms.v1.ImportJob] can subsequently be + // obtained by including the + // [ListImportJobsResponse.next_page_token][google.cloud.kms.v1.ListImportJobsResponse.next_page_token] + // in a subsequent request. If unspecified, the server will pick an + // appropriate default. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. Optional pagination token, returned earlier via + // [ListImportJobsResponse.next_page_token][google.cloud.kms.v1.ListImportJobsResponse.next_page_token]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // Optional. Only include resources that match the filter in the response. For + // more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. Specify how the results should be sorted. If not specified, the + // results will be sorted in the default order. For more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + OrderBy string `protobuf:"bytes,5,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` +} + +func (x *ListImportJobsRequest) Reset() { + *x = ListImportJobsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListImportJobsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListImportJobsRequest) ProtoMessage() {} + +func (x *ListImportJobsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListImportJobsRequest.ProtoReflect.Descriptor instead. +func (*ListImportJobsRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{3} +} + +func (x *ListImportJobsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListImportJobsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListImportJobsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListImportJobsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListImportJobsRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +// Response message for +// [KeyManagementService.ListKeyRings][google.cloud.kms.v1.KeyManagementService.ListKeyRings]. +type ListKeyRingsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of [KeyRings][google.cloud.kms.v1.KeyRing]. + KeyRings []*KeyRing `protobuf:"bytes,1,rep,name=key_rings,json=keyRings,proto3" json:"key_rings,omitempty"` + // A token to retrieve next page of results. Pass this value in + // [ListKeyRingsRequest.page_token][google.cloud.kms.v1.ListKeyRingsRequest.page_token] + // to retrieve the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of [KeyRings][google.cloud.kms.v1.KeyRing] that matched + // the query. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListKeyRingsResponse) Reset() { + *x = ListKeyRingsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListKeyRingsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListKeyRingsResponse) ProtoMessage() {} + +func (x *ListKeyRingsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListKeyRingsResponse.ProtoReflect.Descriptor instead. +func (*ListKeyRingsResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{4} +} + +func (x *ListKeyRingsResponse) GetKeyRings() []*KeyRing { + if x != nil { + return x.KeyRings + } + return nil +} + +func (x *ListKeyRingsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListKeyRingsResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// Response message for +// [KeyManagementService.ListCryptoKeys][google.cloud.kms.v1.KeyManagementService.ListCryptoKeys]. +type ListCryptoKeysResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of [CryptoKeys][google.cloud.kms.v1.CryptoKey]. + CryptoKeys []*CryptoKey `protobuf:"bytes,1,rep,name=crypto_keys,json=cryptoKeys,proto3" json:"crypto_keys,omitempty"` + // A token to retrieve next page of results. Pass this value in + // [ListCryptoKeysRequest.page_token][google.cloud.kms.v1.ListCryptoKeysRequest.page_token] + // to retrieve the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of [CryptoKeys][google.cloud.kms.v1.CryptoKey] that + // matched the query. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListCryptoKeysResponse) Reset() { + *x = ListCryptoKeysResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListCryptoKeysResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListCryptoKeysResponse) ProtoMessage() {} + +func (x *ListCryptoKeysResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListCryptoKeysResponse.ProtoReflect.Descriptor instead. +func (*ListCryptoKeysResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{5} +} + +func (x *ListCryptoKeysResponse) GetCryptoKeys() []*CryptoKey { + if x != nil { + return x.CryptoKeys + } + return nil +} + +func (x *ListCryptoKeysResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListCryptoKeysResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// Response message for +// [KeyManagementService.ListCryptoKeyVersions][google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions]. +type ListCryptoKeyVersionsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]. + CryptoKeyVersions []*CryptoKeyVersion `protobuf:"bytes,1,rep,name=crypto_key_versions,json=cryptoKeyVersions,proto3" json:"crypto_key_versions,omitempty"` + // A token to retrieve next page of results. Pass this value in + // [ListCryptoKeyVersionsRequest.page_token][google.cloud.kms.v1.ListCryptoKeyVersionsRequest.page_token] + // to retrieve the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of + // [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] that matched the + // query. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListCryptoKeyVersionsResponse) Reset() { + *x = ListCryptoKeyVersionsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListCryptoKeyVersionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListCryptoKeyVersionsResponse) ProtoMessage() {} + +func (x *ListCryptoKeyVersionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListCryptoKeyVersionsResponse.ProtoReflect.Descriptor instead. +func (*ListCryptoKeyVersionsResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{6} +} + +func (x *ListCryptoKeyVersionsResponse) GetCryptoKeyVersions() []*CryptoKeyVersion { + if x != nil { + return x.CryptoKeyVersions + } + return nil +} + +func (x *ListCryptoKeyVersionsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListCryptoKeyVersionsResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// Response message for +// [KeyManagementService.ListImportJobs][google.cloud.kms.v1.KeyManagementService.ListImportJobs]. +type ListImportJobsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of [ImportJobs][google.cloud.kms.v1.ImportJob]. + ImportJobs []*ImportJob `protobuf:"bytes,1,rep,name=import_jobs,json=importJobs,proto3" json:"import_jobs,omitempty"` + // A token to retrieve next page of results. Pass this value in + // [ListImportJobsRequest.page_token][google.cloud.kms.v1.ListImportJobsRequest.page_token] + // to retrieve the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of [ImportJobs][google.cloud.kms.v1.ImportJob] that + // matched the query. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListImportJobsResponse) Reset() { + *x = ListImportJobsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListImportJobsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListImportJobsResponse) ProtoMessage() {} + +func (x *ListImportJobsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListImportJobsResponse.ProtoReflect.Descriptor instead. +func (*ListImportJobsResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{7} +} + +func (x *ListImportJobsResponse) GetImportJobs() []*ImportJob { + if x != nil { + return x.ImportJobs + } + return nil +} + +func (x *ListImportJobsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListImportJobsResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// Request message for +// [KeyManagementService.GetKeyRing][google.cloud.kms.v1.KeyManagementService.GetKeyRing]. +type GetKeyRingRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.KeyRing.name] of the + // [KeyRing][google.cloud.kms.v1.KeyRing] to get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetKeyRingRequest) Reset() { + *x = GetKeyRingRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetKeyRingRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetKeyRingRequest) ProtoMessage() {} + +func (x *GetKeyRingRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetKeyRingRequest.ProtoReflect.Descriptor instead. +func (*GetKeyRingRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{8} +} + +func (x *GetKeyRingRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [KeyManagementService.GetCryptoKey][google.cloud.kms.v1.KeyManagementService.GetCryptoKey]. +type GetCryptoKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.CryptoKey.name] of the + // [CryptoKey][google.cloud.kms.v1.CryptoKey] to get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetCryptoKeyRequest) Reset() { + *x = GetCryptoKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCryptoKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCryptoKeyRequest) ProtoMessage() {} + +func (x *GetCryptoKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCryptoKeyRequest.ProtoReflect.Descriptor instead. +func (*GetCryptoKeyRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{9} +} + +func (x *GetCryptoKeyRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [KeyManagementService.GetCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.GetCryptoKeyVersion]. +type GetCryptoKeyVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.CryptoKeyVersion.name] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetCryptoKeyVersionRequest) Reset() { + *x = GetCryptoKeyVersionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCryptoKeyVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCryptoKeyVersionRequest) ProtoMessage() {} + +func (x *GetCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCryptoKeyVersionRequest.ProtoReflect.Descriptor instead. +func (*GetCryptoKeyVersionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{10} +} + +func (x *GetCryptoKeyVersionRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [KeyManagementService.GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. +type GetPublicKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.CryptoKeyVersion.name] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] public key to get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetPublicKeyRequest) Reset() { + *x = GetPublicKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetPublicKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPublicKeyRequest) ProtoMessage() {} + +func (x *GetPublicKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPublicKeyRequest.ProtoReflect.Descriptor instead. +func (*GetPublicKeyRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{11} +} + +func (x *GetPublicKeyRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [KeyManagementService.GetImportJob][google.cloud.kms.v1.KeyManagementService.GetImportJob]. +type GetImportJobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.ImportJob.name] of the + // [ImportJob][google.cloud.kms.v1.ImportJob] to get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetImportJobRequest) Reset() { + *x = GetImportJobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetImportJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetImportJobRequest) ProtoMessage() {} + +func (x *GetImportJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetImportJobRequest.ProtoReflect.Descriptor instead. +func (*GetImportJobRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{12} +} + +func (x *GetImportJobRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [KeyManagementService.CreateKeyRing][google.cloud.kms.v1.KeyManagementService.CreateKeyRing]. +type CreateKeyRingRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the location associated with the + // [KeyRings][google.cloud.kms.v1.KeyRing], in the format + // `projects/*/locations/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. It must be unique within a location and match the regular + // expression `[a-zA-Z0-9_-]{1,63}` + KeyRingId string `protobuf:"bytes,2,opt,name=key_ring_id,json=keyRingId,proto3" json:"key_ring_id,omitempty"` + // Required. A [KeyRing][google.cloud.kms.v1.KeyRing] with initial field + // values. + KeyRing *KeyRing `protobuf:"bytes,3,opt,name=key_ring,json=keyRing,proto3" json:"key_ring,omitempty"` +} + +func (x *CreateKeyRingRequest) Reset() { + *x = CreateKeyRingRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateKeyRingRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateKeyRingRequest) ProtoMessage() {} + +func (x *CreateKeyRingRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateKeyRingRequest.ProtoReflect.Descriptor instead. +func (*CreateKeyRingRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{13} +} + +func (x *CreateKeyRingRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateKeyRingRequest) GetKeyRingId() string { + if x != nil { + return x.KeyRingId + } + return "" +} + +func (x *CreateKeyRingRequest) GetKeyRing() *KeyRing { + if x != nil { + return x.KeyRing + } + return nil +} + +// Request message for +// [KeyManagementService.CreateCryptoKey][google.cloud.kms.v1.KeyManagementService.CreateCryptoKey]. +type CreateCryptoKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.KeyRing.name] of the KeyRing + // associated with the [CryptoKeys][google.cloud.kms.v1.CryptoKey]. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. It must be unique within a KeyRing and match the regular + // expression `[a-zA-Z0-9_-]{1,63}` + CryptoKeyId string `protobuf:"bytes,2,opt,name=crypto_key_id,json=cryptoKeyId,proto3" json:"crypto_key_id,omitempty"` + // Required. A [CryptoKey][google.cloud.kms.v1.CryptoKey] with initial field + // values. + CryptoKey *CryptoKey `protobuf:"bytes,3,opt,name=crypto_key,json=cryptoKey,proto3" json:"crypto_key,omitempty"` + // If set to true, the request will create a + // [CryptoKey][google.cloud.kms.v1.CryptoKey] without any + // [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]. You must + // manually call + // [CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion] + // or + // [ImportCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion] + // before you can use this [CryptoKey][google.cloud.kms.v1.CryptoKey]. + SkipInitialVersionCreation bool `protobuf:"varint,5,opt,name=skip_initial_version_creation,json=skipInitialVersionCreation,proto3" json:"skip_initial_version_creation,omitempty"` +} + +func (x *CreateCryptoKeyRequest) Reset() { + *x = CreateCryptoKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateCryptoKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateCryptoKeyRequest) ProtoMessage() {} + +func (x *CreateCryptoKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateCryptoKeyRequest.ProtoReflect.Descriptor instead. +func (*CreateCryptoKeyRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{14} +} + +func (x *CreateCryptoKeyRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateCryptoKeyRequest) GetCryptoKeyId() string { + if x != nil { + return x.CryptoKeyId + } + return "" +} + +func (x *CreateCryptoKeyRequest) GetCryptoKey() *CryptoKey { + if x != nil { + return x.CryptoKey + } + return nil +} + +func (x *CreateCryptoKeyRequest) GetSkipInitialVersionCreation() bool { + if x != nil { + return x.SkipInitialVersionCreation + } + return false +} + +// Request message for +// [KeyManagementService.CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion]. +type CreateCryptoKeyVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.CryptoKey.name] of the + // [CryptoKey][google.cloud.kms.v1.CryptoKey] associated with the + // [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. A [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with + // initial field values. + CryptoKeyVersion *CryptoKeyVersion `protobuf:"bytes,2,opt,name=crypto_key_version,json=cryptoKeyVersion,proto3" json:"crypto_key_version,omitempty"` +} + +func (x *CreateCryptoKeyVersionRequest) Reset() { + *x = CreateCryptoKeyVersionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateCryptoKeyVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateCryptoKeyVersionRequest) ProtoMessage() {} + +func (x *CreateCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateCryptoKeyVersionRequest.ProtoReflect.Descriptor instead. +func (*CreateCryptoKeyVersionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{15} +} + +func (x *CreateCryptoKeyVersionRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateCryptoKeyVersionRequest) GetCryptoKeyVersion() *CryptoKeyVersion { + if x != nil { + return x.CryptoKeyVersion + } + return nil +} + +// Request message for +// [KeyManagementService.ImportCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion]. +type ImportCryptoKeyVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.CryptoKey.name] of the + // [CryptoKey][google.cloud.kms.v1.CryptoKey] to be imported into. + // + // The create permission is only required on this key when creating a new + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. The optional [name][google.cloud.kms.v1.CryptoKeyVersion.name] of + // an existing [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to + // target for an import operation. If this field is not present, a new + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] containing the + // supplied key material is created. + // + // If this field is present, the supplied key material is imported into + // the existing [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. To + // import into an existing + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] must be a child of + // [ImportCryptoKeyVersionRequest.parent][google.cloud.kms.v1.ImportCryptoKeyVersionRequest.parent], + // have been previously created via [ImportCryptoKeyVersion][], and be in + // [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED] + // or + // [IMPORT_FAILED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.IMPORT_FAILED] + // state. The key material and algorithm must match the previous + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] exactly if the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] has ever contained + // key material. + CryptoKeyVersion string `protobuf:"bytes,6,opt,name=crypto_key_version,json=cryptoKeyVersion,proto3" json:"crypto_key_version,omitempty"` + // Required. The + // [algorithm][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm] + // of the key being imported. This does not need to match the + // [version_template][google.cloud.kms.v1.CryptoKey.version_template] of the + // [CryptoKey][google.cloud.kms.v1.CryptoKey] this version imports into. + Algorithm CryptoKeyVersion_CryptoKeyVersionAlgorithm `protobuf:"varint,2,opt,name=algorithm,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionAlgorithm" json:"algorithm,omitempty"` + // Required. The [name][google.cloud.kms.v1.ImportJob.name] of the + // [ImportJob][google.cloud.kms.v1.ImportJob] that was used to wrap this key + // material. + ImportJob string `protobuf:"bytes,4,opt,name=import_job,json=importJob,proto3" json:"import_job,omitempty"` + // Optional. The wrapped key material to import. + // + // Before wrapping, key material must be formatted. If importing symmetric key + // material, the expected key material format is plain bytes. If importing + // asymmetric key material, the expected key material format is PKCS#8-encoded + // DER (the PrivateKeyInfo structure from RFC 5208). + // + // When wrapping with import methods + // ([RSA_OAEP_3072_SHA1_AES_256][google.cloud.kms.v1.ImportJob.ImportMethod.RSA_OAEP_3072_SHA1_AES_256] + // or + // [RSA_OAEP_4096_SHA1_AES_256][google.cloud.kms.v1.ImportJob.ImportMethod.RSA_OAEP_4096_SHA1_AES_256] + // or + // [RSA_OAEP_3072_SHA256_AES_256][google.cloud.kms.v1.ImportJob.ImportMethod.RSA_OAEP_3072_SHA256_AES_256] + // or + // [RSA_OAEP_4096_SHA256_AES_256][google.cloud.kms.v1.ImportJob.ImportMethod.RSA_OAEP_4096_SHA256_AES_256]), + // + // this field must contain the concatenation of: + //
    + // + //
  1. An ephemeral AES-256 wrapping key wrapped with the + // [public_key][google.cloud.kms.v1.ImportJob.public_key] using + // RSAES-OAEP with SHA-1/SHA-256, MGF1 with SHA-1/SHA-256, and an empty + // label. + //
  2. + //
  3. The formatted key to be imported, wrapped with the ephemeral AES-256 + // key using AES-KWP (RFC 5649). + //
  4. + // + //
+ // + // This format is the same as the format produced by PKCS#11 mechanism + // CKM_RSA_AES_KEY_WRAP. + // + // When wrapping with import methods + // ([RSA_OAEP_3072_SHA256][google.cloud.kms.v1.ImportJob.ImportMethod.RSA_OAEP_3072_SHA256] + // or + // [RSA_OAEP_4096_SHA256][google.cloud.kms.v1.ImportJob.ImportMethod.RSA_OAEP_4096_SHA256]), + // + // this field must contain the formatted key to be imported, wrapped with the + // [public_key][google.cloud.kms.v1.ImportJob.public_key] using RSAES-OAEP + // with SHA-256, MGF1 with SHA-256, and an empty label. + WrappedKey []byte `protobuf:"bytes,8,opt,name=wrapped_key,json=wrappedKey,proto3" json:"wrapped_key,omitempty"` + // This field is legacy. Use the field + // [wrapped_key][google.cloud.kms.v1.ImportCryptoKeyVersionRequest.wrapped_key] + // instead. + // + // Types that are assignable to WrappedKeyMaterial: + // + // *ImportCryptoKeyVersionRequest_RsaAesWrappedKey + WrappedKeyMaterial isImportCryptoKeyVersionRequest_WrappedKeyMaterial `protobuf_oneof:"wrapped_key_material"` +} + +func (x *ImportCryptoKeyVersionRequest) Reset() { + *x = ImportCryptoKeyVersionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportCryptoKeyVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportCryptoKeyVersionRequest) ProtoMessage() {} + +func (x *ImportCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportCryptoKeyVersionRequest.ProtoReflect.Descriptor instead. +func (*ImportCryptoKeyVersionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{16} +} + +func (x *ImportCryptoKeyVersionRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ImportCryptoKeyVersionRequest) GetCryptoKeyVersion() string { + if x != nil { + return x.CryptoKeyVersion + } + return "" +} + +func (x *ImportCryptoKeyVersionRequest) GetAlgorithm() CryptoKeyVersion_CryptoKeyVersionAlgorithm { + if x != nil { + return x.Algorithm + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED +} + +func (x *ImportCryptoKeyVersionRequest) GetImportJob() string { + if x != nil { + return x.ImportJob + } + return "" +} + +func (x *ImportCryptoKeyVersionRequest) GetWrappedKey() []byte { + if x != nil { + return x.WrappedKey + } + return nil +} + +func (m *ImportCryptoKeyVersionRequest) GetWrappedKeyMaterial() isImportCryptoKeyVersionRequest_WrappedKeyMaterial { + if m != nil { + return m.WrappedKeyMaterial + } + return nil +} + +func (x *ImportCryptoKeyVersionRequest) GetRsaAesWrappedKey() []byte { + if x, ok := x.GetWrappedKeyMaterial().(*ImportCryptoKeyVersionRequest_RsaAesWrappedKey); ok { + return x.RsaAesWrappedKey + } + return nil +} + +type isImportCryptoKeyVersionRequest_WrappedKeyMaterial interface { + isImportCryptoKeyVersionRequest_WrappedKeyMaterial() +} + +type ImportCryptoKeyVersionRequest_RsaAesWrappedKey struct { + // Optional. This field has the same meaning as + // [wrapped_key][google.cloud.kms.v1.ImportCryptoKeyVersionRequest.wrapped_key]. + // Prefer to use that field in new work. Either that field or this field + // (but not both) must be specified. + RsaAesWrappedKey []byte `protobuf:"bytes,5,opt,name=rsa_aes_wrapped_key,json=rsaAesWrappedKey,proto3,oneof"` +} + +func (*ImportCryptoKeyVersionRequest_RsaAesWrappedKey) isImportCryptoKeyVersionRequest_WrappedKeyMaterial() { +} + +// Request message for +// [KeyManagementService.CreateImportJob][google.cloud.kms.v1.KeyManagementService.CreateImportJob]. +type CreateImportJobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.KeyRing.name] of the + // [KeyRing][google.cloud.kms.v1.KeyRing] associated with the + // [ImportJobs][google.cloud.kms.v1.ImportJob]. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. It must be unique within a KeyRing and match the regular + // expression `[a-zA-Z0-9_-]{1,63}` + ImportJobId string `protobuf:"bytes,2,opt,name=import_job_id,json=importJobId,proto3" json:"import_job_id,omitempty"` + // Required. An [ImportJob][google.cloud.kms.v1.ImportJob] with initial field + // values. + ImportJob *ImportJob `protobuf:"bytes,3,opt,name=import_job,json=importJob,proto3" json:"import_job,omitempty"` +} + +func (x *CreateImportJobRequest) Reset() { + *x = CreateImportJobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateImportJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateImportJobRequest) ProtoMessage() {} + +func (x *CreateImportJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateImportJobRequest.ProtoReflect.Descriptor instead. +func (*CreateImportJobRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{17} +} + +func (x *CreateImportJobRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateImportJobRequest) GetImportJobId() string { + if x != nil { + return x.ImportJobId + } + return "" +} + +func (x *CreateImportJobRequest) GetImportJob() *ImportJob { + if x != nil { + return x.ImportJob + } + return nil +} + +// Request message for +// [KeyManagementService.UpdateCryptoKey][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKey]. +type UpdateCryptoKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. [CryptoKey][google.cloud.kms.v1.CryptoKey] with updated values. + CryptoKey *CryptoKey `protobuf:"bytes,1,opt,name=crypto_key,json=cryptoKey,proto3" json:"crypto_key,omitempty"` + // Required. List of fields to be updated in this request. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateCryptoKeyRequest) Reset() { + *x = UpdateCryptoKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCryptoKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCryptoKeyRequest) ProtoMessage() {} + +func (x *UpdateCryptoKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCryptoKeyRequest.ProtoReflect.Descriptor instead. +func (*UpdateCryptoKeyRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{18} +} + +func (x *UpdateCryptoKeyRequest) GetCryptoKey() *CryptoKey { + if x != nil { + return x.CryptoKey + } + return nil +} + +func (x *UpdateCryptoKeyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Request message for +// [KeyManagementService.UpdateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyVersion]. +type UpdateCryptoKeyVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with + // updated values. + CryptoKeyVersion *CryptoKeyVersion `protobuf:"bytes,1,opt,name=crypto_key_version,json=cryptoKeyVersion,proto3" json:"crypto_key_version,omitempty"` + // Required. List of fields to be updated in this request. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateCryptoKeyVersionRequest) Reset() { + *x = UpdateCryptoKeyVersionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCryptoKeyVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCryptoKeyVersionRequest) ProtoMessage() {} + +func (x *UpdateCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCryptoKeyVersionRequest.ProtoReflect.Descriptor instead. +func (*UpdateCryptoKeyVersionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{19} +} + +func (x *UpdateCryptoKeyVersionRequest) GetCryptoKeyVersion() *CryptoKeyVersion { + if x != nil { + return x.CryptoKeyVersion + } + return nil +} + +func (x *UpdateCryptoKeyVersionRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Request message for +// [KeyManagementService.UpdateCryptoKeyPrimaryVersion][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion]. +type UpdateCryptoKeyPrimaryVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKey][google.cloud.kms.v1.CryptoKey] to update. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The id of the child + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to use as primary. + CryptoKeyVersionId string `protobuf:"bytes,2,opt,name=crypto_key_version_id,json=cryptoKeyVersionId,proto3" json:"crypto_key_version_id,omitempty"` +} + +func (x *UpdateCryptoKeyPrimaryVersionRequest) Reset() { + *x = UpdateCryptoKeyPrimaryVersionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCryptoKeyPrimaryVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCryptoKeyPrimaryVersionRequest) ProtoMessage() {} + +func (x *UpdateCryptoKeyPrimaryVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCryptoKeyPrimaryVersionRequest.ProtoReflect.Descriptor instead. +func (*UpdateCryptoKeyPrimaryVersionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{20} +} + +func (x *UpdateCryptoKeyPrimaryVersionRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UpdateCryptoKeyPrimaryVersionRequest) GetCryptoKeyVersionId() string { + if x != nil { + return x.CryptoKeyVersionId + } + return "" +} + +// Request message for +// [KeyManagementService.DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion]. +type DestroyCryptoKeyVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to destroy. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DestroyCryptoKeyVersionRequest) Reset() { + *x = DestroyCryptoKeyVersionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DestroyCryptoKeyVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DestroyCryptoKeyVersionRequest) ProtoMessage() {} + +func (x *DestroyCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DestroyCryptoKeyVersionRequest.ProtoReflect.Descriptor instead. +func (*DestroyCryptoKeyVersionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{21} +} + +func (x *DestroyCryptoKeyVersionRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [KeyManagementService.RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion]. +type RestoreCryptoKeyVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to restore. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *RestoreCryptoKeyVersionRequest) Reset() { + *x = RestoreCryptoKeyVersionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestoreCryptoKeyVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestoreCryptoKeyVersionRequest) ProtoMessage() {} + +func (x *RestoreCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RestoreCryptoKeyVersionRequest.ProtoReflect.Descriptor instead. +func (*RestoreCryptoKeyVersionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{22} +} + +func (x *RestoreCryptoKeyVersionRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [KeyManagementService.Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. +type EncryptRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKey][google.cloud.kms.v1.CryptoKey] or + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to use for + // encryption. + // + // If a [CryptoKey][google.cloud.kms.v1.CryptoKey] is specified, the server + // will use its [primary version][google.cloud.kms.v1.CryptoKey.primary]. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The data to encrypt. Must be no larger than 64KiB. + // + // The maximum size depends on the key version's + // [protection_level][google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level]. + // For [SOFTWARE][google.cloud.kms.v1.ProtectionLevel.SOFTWARE], + // [EXTERNAL][google.cloud.kms.v1.ProtectionLevel.EXTERNAL], and + // [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC] keys, the + // plaintext must be no larger than 64KiB. For + // [HSM][google.cloud.kms.v1.ProtectionLevel.HSM] keys, the combined length of + // the plaintext and additional_authenticated_data fields must be no larger + // than 8KiB. + Plaintext []byte `protobuf:"bytes,2,opt,name=plaintext,proto3" json:"plaintext,omitempty"` + // Optional. Optional data that, if specified, must also be provided during + // decryption through + // [DecryptRequest.additional_authenticated_data][google.cloud.kms.v1.DecryptRequest.additional_authenticated_data]. + // + // The maximum size depends on the key version's + // [protection_level][google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level]. + // For [SOFTWARE][google.cloud.kms.v1.ProtectionLevel.SOFTWARE], + // [EXTERNAL][google.cloud.kms.v1.ProtectionLevel.EXTERNAL], and + // [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC] keys the + // AAD must be no larger than 64KiB. For + // [HSM][google.cloud.kms.v1.ProtectionLevel.HSM] keys, the combined length of + // the plaintext and additional_authenticated_data fields must be no larger + // than 8KiB. + AdditionalAuthenticatedData []byte `protobuf:"bytes,3,opt,name=additional_authenticated_data,json=additionalAuthenticatedData,proto3" json:"additional_authenticated_data,omitempty"` + // Optional. An optional CRC32C checksum of the + // [EncryptRequest.plaintext][google.cloud.kms.v1.EncryptRequest.plaintext]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received + // [EncryptRequest.plaintext][google.cloud.kms.v1.EncryptRequest.plaintext] + // using this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([EncryptRequest.plaintext][google.cloud.kms.v1.EncryptRequest.plaintext]) + // is equal to + // [EncryptRequest.plaintext_crc32c][google.cloud.kms.v1.EncryptRequest.plaintext_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + PlaintextCrc32C *wrapperspb.Int64Value `protobuf:"bytes,7,opt,name=plaintext_crc32c,json=plaintextCrc32c,proto3" json:"plaintext_crc32c,omitempty"` + // Optional. An optional CRC32C checksum of the + // [EncryptRequest.additional_authenticated_data][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received + // [EncryptRequest.additional_authenticated_data][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data] + // using this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([EncryptRequest.additional_authenticated_data][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data]) + // is equal to + // [EncryptRequest.additional_authenticated_data_crc32c][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + AdditionalAuthenticatedDataCrc32C *wrapperspb.Int64Value `protobuf:"bytes,8,opt,name=additional_authenticated_data_crc32c,json=additionalAuthenticatedDataCrc32c,proto3" json:"additional_authenticated_data_crc32c,omitempty"` +} + +func (x *EncryptRequest) Reset() { + *x = EncryptRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EncryptRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EncryptRequest) ProtoMessage() {} + +func (x *EncryptRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EncryptRequest.ProtoReflect.Descriptor instead. +func (*EncryptRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{23} +} + +func (x *EncryptRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *EncryptRequest) GetPlaintext() []byte { + if x != nil { + return x.Plaintext + } + return nil +} + +func (x *EncryptRequest) GetAdditionalAuthenticatedData() []byte { + if x != nil { + return x.AdditionalAuthenticatedData + } + return nil +} + +func (x *EncryptRequest) GetPlaintextCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.PlaintextCrc32C + } + return nil +} + +func (x *EncryptRequest) GetAdditionalAuthenticatedDataCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.AdditionalAuthenticatedDataCrc32C + } + return nil +} + +// Request message for +// [KeyManagementService.Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. +type DecryptRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKey][google.cloud.kms.v1.CryptoKey] to use for decryption. The + // server will choose the appropriate version. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The encrypted data originally returned in + // [EncryptResponse.ciphertext][google.cloud.kms.v1.EncryptResponse.ciphertext]. + Ciphertext []byte `protobuf:"bytes,2,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` + // Optional. Optional data that must match the data originally supplied in + // [EncryptRequest.additional_authenticated_data][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data]. + AdditionalAuthenticatedData []byte `protobuf:"bytes,3,opt,name=additional_authenticated_data,json=additionalAuthenticatedData,proto3" json:"additional_authenticated_data,omitempty"` + // Optional. An optional CRC32C checksum of the + // [DecryptRequest.ciphertext][google.cloud.kms.v1.DecryptRequest.ciphertext]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received + // [DecryptRequest.ciphertext][google.cloud.kms.v1.DecryptRequest.ciphertext] + // using this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([DecryptRequest.ciphertext][google.cloud.kms.v1.DecryptRequest.ciphertext]) + // is equal to + // [DecryptRequest.ciphertext_crc32c][google.cloud.kms.v1.DecryptRequest.ciphertext_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + CiphertextCrc32C *wrapperspb.Int64Value `protobuf:"bytes,5,opt,name=ciphertext_crc32c,json=ciphertextCrc32c,proto3" json:"ciphertext_crc32c,omitempty"` + // Optional. An optional CRC32C checksum of the + // [DecryptRequest.additional_authenticated_data][google.cloud.kms.v1.DecryptRequest.additional_authenticated_data]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received + // [DecryptRequest.additional_authenticated_data][google.cloud.kms.v1.DecryptRequest.additional_authenticated_data] + // using this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([DecryptRequest.additional_authenticated_data][google.cloud.kms.v1.DecryptRequest.additional_authenticated_data]) + // is equal to + // [DecryptRequest.additional_authenticated_data_crc32c][google.cloud.kms.v1.DecryptRequest.additional_authenticated_data_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + AdditionalAuthenticatedDataCrc32C *wrapperspb.Int64Value `protobuf:"bytes,6,opt,name=additional_authenticated_data_crc32c,json=additionalAuthenticatedDataCrc32c,proto3" json:"additional_authenticated_data_crc32c,omitempty"` +} + +func (x *DecryptRequest) Reset() { + *x = DecryptRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DecryptRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DecryptRequest) ProtoMessage() {} + +func (x *DecryptRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DecryptRequest.ProtoReflect.Descriptor instead. +func (*DecryptRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{24} +} + +func (x *DecryptRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DecryptRequest) GetCiphertext() []byte { + if x != nil { + return x.Ciphertext + } + return nil +} + +func (x *DecryptRequest) GetAdditionalAuthenticatedData() []byte { + if x != nil { + return x.AdditionalAuthenticatedData + } + return nil +} + +func (x *DecryptRequest) GetCiphertextCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.CiphertextCrc32C + } + return nil +} + +func (x *DecryptRequest) GetAdditionalAuthenticatedDataCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.AdditionalAuthenticatedDataCrc32C + } + return nil +} + +// Request message for +// [KeyManagementService.AsymmetricSign][google.cloud.kms.v1.KeyManagementService.AsymmetricSign]. +type AsymmetricSignRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to use for + // signing. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Optional. The digest of the data to sign. The digest must be produced with + // the same digest algorithm as specified by the key version's + // [algorithm][google.cloud.kms.v1.CryptoKeyVersion.algorithm]. + // + // This field may not be supplied if + // [AsymmetricSignRequest.data][google.cloud.kms.v1.AsymmetricSignRequest.data] + // is supplied. + Digest *Digest `protobuf:"bytes,3,opt,name=digest,proto3" json:"digest,omitempty"` + // Optional. An optional CRC32C checksum of the + // [AsymmetricSignRequest.digest][google.cloud.kms.v1.AsymmetricSignRequest.digest]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received + // [AsymmetricSignRequest.digest][google.cloud.kms.v1.AsymmetricSignRequest.digest] + // using this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([AsymmetricSignRequest.digest][google.cloud.kms.v1.AsymmetricSignRequest.digest]) + // is equal to + // [AsymmetricSignRequest.digest_crc32c][google.cloud.kms.v1.AsymmetricSignRequest.digest_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + DigestCrc32C *wrapperspb.Int64Value `protobuf:"bytes,4,opt,name=digest_crc32c,json=digestCrc32c,proto3" json:"digest_crc32c,omitempty"` + // Optional. The data to sign. + // It can't be supplied if + // [AsymmetricSignRequest.digest][google.cloud.kms.v1.AsymmetricSignRequest.digest] + // is supplied. + Data []byte `protobuf:"bytes,6,opt,name=data,proto3" json:"data,omitempty"` + // Optional. An optional CRC32C checksum of the + // [AsymmetricSignRequest.data][google.cloud.kms.v1.AsymmetricSignRequest.data]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received + // [AsymmetricSignRequest.data][google.cloud.kms.v1.AsymmetricSignRequest.data] + // using this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([AsymmetricSignRequest.data][google.cloud.kms.v1.AsymmetricSignRequest.data]) + // is equal to + // [AsymmetricSignRequest.data_crc32c][google.cloud.kms.v1.AsymmetricSignRequest.data_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + DataCrc32C *wrapperspb.Int64Value `protobuf:"bytes,7,opt,name=data_crc32c,json=dataCrc32c,proto3" json:"data_crc32c,omitempty"` +} + +func (x *AsymmetricSignRequest) Reset() { + *x = AsymmetricSignRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AsymmetricSignRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AsymmetricSignRequest) ProtoMessage() {} + +func (x *AsymmetricSignRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AsymmetricSignRequest.ProtoReflect.Descriptor instead. +func (*AsymmetricSignRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{25} +} + +func (x *AsymmetricSignRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AsymmetricSignRequest) GetDigest() *Digest { + if x != nil { + return x.Digest + } + return nil +} + +func (x *AsymmetricSignRequest) GetDigestCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.DigestCrc32C + } + return nil +} + +func (x *AsymmetricSignRequest) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *AsymmetricSignRequest) GetDataCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.DataCrc32C + } + return nil +} + +// Request message for +// [KeyManagementService.AsymmetricDecrypt][google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt]. +type AsymmetricDecryptRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to use for + // decryption. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The data encrypted with the named + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s public key using + // OAEP. + Ciphertext []byte `protobuf:"bytes,3,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` + // Optional. An optional CRC32C checksum of the + // [AsymmetricDecryptRequest.ciphertext][google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received + // [AsymmetricDecryptRequest.ciphertext][google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext] + // using this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([AsymmetricDecryptRequest.ciphertext][google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext]) + // is equal to + // [AsymmetricDecryptRequest.ciphertext_crc32c][google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + CiphertextCrc32C *wrapperspb.Int64Value `protobuf:"bytes,4,opt,name=ciphertext_crc32c,json=ciphertextCrc32c,proto3" json:"ciphertext_crc32c,omitempty"` +} + +func (x *AsymmetricDecryptRequest) Reset() { + *x = AsymmetricDecryptRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AsymmetricDecryptRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AsymmetricDecryptRequest) ProtoMessage() {} + +func (x *AsymmetricDecryptRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AsymmetricDecryptRequest.ProtoReflect.Descriptor instead. +func (*AsymmetricDecryptRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{26} +} + +func (x *AsymmetricDecryptRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AsymmetricDecryptRequest) GetCiphertext() []byte { + if x != nil { + return x.Ciphertext + } + return nil +} + +func (x *AsymmetricDecryptRequest) GetCiphertextCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.CiphertextCrc32C + } + return nil +} + +// Request message for +// [KeyManagementService.MacSign][google.cloud.kms.v1.KeyManagementService.MacSign]. +type MacSignRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to use for + // signing. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The data to sign. The MAC tag is computed over this data field + // based on the specific algorithm. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Optional. An optional CRC32C checksum of the + // [MacSignRequest.data][google.cloud.kms.v1.MacSignRequest.data]. If + // specified, [KeyManagementService][google.cloud.kms.v1.KeyManagementService] + // will verify the integrity of the received + // [MacSignRequest.data][google.cloud.kms.v1.MacSignRequest.data] using this + // checksum. [KeyManagementService][google.cloud.kms.v1.KeyManagementService] + // will report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([MacSignRequest.data][google.cloud.kms.v1.MacSignRequest.data]) is + // equal to + // [MacSignRequest.data_crc32c][google.cloud.kms.v1.MacSignRequest.data_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + DataCrc32C *wrapperspb.Int64Value `protobuf:"bytes,3,opt,name=data_crc32c,json=dataCrc32c,proto3" json:"data_crc32c,omitempty"` +} + +func (x *MacSignRequest) Reset() { + *x = MacSignRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MacSignRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MacSignRequest) ProtoMessage() {} + +func (x *MacSignRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MacSignRequest.ProtoReflect.Descriptor instead. +func (*MacSignRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{27} +} + +func (x *MacSignRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MacSignRequest) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *MacSignRequest) GetDataCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.DataCrc32C + } + return nil +} + +// Request message for +// [KeyManagementService.MacVerify][google.cloud.kms.v1.KeyManagementService.MacVerify]. +type MacVerifyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to use for + // verification. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The data used previously as a + // [MacSignRequest.data][google.cloud.kms.v1.MacSignRequest.data] to generate + // the MAC tag. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Optional. An optional CRC32C checksum of the + // [MacVerifyRequest.data][google.cloud.kms.v1.MacVerifyRequest.data]. If + // specified, [KeyManagementService][google.cloud.kms.v1.KeyManagementService] + // will verify the integrity of the received + // [MacVerifyRequest.data][google.cloud.kms.v1.MacVerifyRequest.data] using + // this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([MacVerifyRequest.data][google.cloud.kms.v1.MacVerifyRequest.data]) + // is equal to + // [MacVerifyRequest.data_crc32c][google.cloud.kms.v1.MacVerifyRequest.data_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + DataCrc32C *wrapperspb.Int64Value `protobuf:"bytes,3,opt,name=data_crc32c,json=dataCrc32c,proto3" json:"data_crc32c,omitempty"` + // Required. The signature to verify. + Mac []byte `protobuf:"bytes,4,opt,name=mac,proto3" json:"mac,omitempty"` + // Optional. An optional CRC32C checksum of the + // [MacVerifyRequest.mac][google.cloud.kms.v1.MacVerifyRequest.mac]. If + // specified, [KeyManagementService][google.cloud.kms.v1.KeyManagementService] + // will verify the integrity of the received + // [MacVerifyRequest.mac][google.cloud.kms.v1.MacVerifyRequest.mac] using this + // checksum. [KeyManagementService][google.cloud.kms.v1.KeyManagementService] + // will report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([MacVerifyRequest.tag][]) is equal to + // [MacVerifyRequest.mac_crc32c][google.cloud.kms.v1.MacVerifyRequest.mac_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + MacCrc32C *wrapperspb.Int64Value `protobuf:"bytes,5,opt,name=mac_crc32c,json=macCrc32c,proto3" json:"mac_crc32c,omitempty"` +} + +func (x *MacVerifyRequest) Reset() { + *x = MacVerifyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MacVerifyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MacVerifyRequest) ProtoMessage() {} + +func (x *MacVerifyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MacVerifyRequest.ProtoReflect.Descriptor instead. +func (*MacVerifyRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{28} +} + +func (x *MacVerifyRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MacVerifyRequest) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *MacVerifyRequest) GetDataCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.DataCrc32C + } + return nil +} + +func (x *MacVerifyRequest) GetMac() []byte { + if x != nil { + return x.Mac + } + return nil +} + +func (x *MacVerifyRequest) GetMacCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.MacCrc32C + } + return nil +} + +// Request message for +// [KeyManagementService.GenerateRandomBytes][google.cloud.kms.v1.KeyManagementService.GenerateRandomBytes]. +type GenerateRandomBytesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The project-specific location in which to generate random bytes. + // For example, "projects/my-project/locations/us-central1". + Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"` + // The length in bytes of the amount of randomness to retrieve. Minimum 8 + // bytes, maximum 1024 bytes. + LengthBytes int32 `protobuf:"varint,2,opt,name=length_bytes,json=lengthBytes,proto3" json:"length_bytes,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] to use when + // generating the random data. Currently, only + // [HSM][google.cloud.kms.v1.ProtectionLevel.HSM] protection level is + // supported. + ProtectionLevel ProtectionLevel `protobuf:"varint,3,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` +} + +func (x *GenerateRandomBytesRequest) Reset() { + *x = GenerateRandomBytesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GenerateRandomBytesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenerateRandomBytesRequest) ProtoMessage() {} + +func (x *GenerateRandomBytesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GenerateRandomBytesRequest.ProtoReflect.Descriptor instead. +func (*GenerateRandomBytesRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{29} +} + +func (x *GenerateRandomBytesRequest) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *GenerateRandomBytesRequest) GetLengthBytes() int32 { + if x != nil { + return x.LengthBytes + } + return 0 +} + +func (x *GenerateRandomBytesRequest) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +// Response message for +// [KeyManagementService.Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. +type EncryptResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used in + // encryption. Check this field to verify that the intended resource was used + // for encryption. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The encrypted data. + Ciphertext []byte `protobuf:"bytes,2,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` + // Integrity verification field. A CRC32C checksum of the returned + // [EncryptResponse.ciphertext][google.cloud.kms.v1.EncryptResponse.ciphertext]. + // An integrity check of + // [EncryptResponse.ciphertext][google.cloud.kms.v1.EncryptResponse.ciphertext] + // can be performed by computing the CRC32C checksum of + // [EncryptResponse.ciphertext][google.cloud.kms.v1.EncryptResponse.ciphertext] + // and comparing your results to this field. Discard the response in case of + // non-matching checksum values, and perform a limited number of retries. A + // persistent mismatch may indicate an issue in your computation of the CRC32C + // checksum. Note: This field is defined as int64 for reasons of compatibility + // across different languages. However, it is a non-negative integer, which + // will never exceed 2^32-1, and can be safely downconverted to uint32 in + // languages that support this type. + CiphertextCrc32C *wrapperspb.Int64Value `protobuf:"bytes,4,opt,name=ciphertext_crc32c,json=ciphertextCrc32c,proto3" json:"ciphertext_crc32c,omitempty"` + // Integrity verification field. A flag indicating whether + // [EncryptRequest.plaintext_crc32c][google.cloud.kms.v1.EncryptRequest.plaintext_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the + // [plaintext][google.cloud.kms.v1.EncryptRequest.plaintext]. A false value of + // this field indicates either that + // [EncryptRequest.plaintext_crc32c][google.cloud.kms.v1.EncryptRequest.plaintext_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [EncryptRequest.plaintext_crc32c][google.cloud.kms.v1.EncryptRequest.plaintext_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedPlaintextCrc32C bool `protobuf:"varint,5,opt,name=verified_plaintext_crc32c,json=verifiedPlaintextCrc32c,proto3" json:"verified_plaintext_crc32c,omitempty"` + // Integrity verification field. A flag indicating whether + // [EncryptRequest.additional_authenticated_data_crc32c][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the + // [AAD][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data]. A + // false value of this field indicates either that + // [EncryptRequest.additional_authenticated_data_crc32c][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [EncryptRequest.additional_authenticated_data_crc32c][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedAdditionalAuthenticatedDataCrc32C bool `protobuf:"varint,6,opt,name=verified_additional_authenticated_data_crc32c,json=verifiedAdditionalAuthenticatedDataCrc32c,proto3" json:"verified_additional_authenticated_data_crc32c,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used in + // encryption. + ProtectionLevel ProtectionLevel `protobuf:"varint,7,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` +} + +func (x *EncryptResponse) Reset() { + *x = EncryptResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EncryptResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EncryptResponse) ProtoMessage() {} + +func (x *EncryptResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EncryptResponse.ProtoReflect.Descriptor instead. +func (*EncryptResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{30} +} + +func (x *EncryptResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *EncryptResponse) GetCiphertext() []byte { + if x != nil { + return x.Ciphertext + } + return nil +} + +func (x *EncryptResponse) GetCiphertextCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.CiphertextCrc32C + } + return nil +} + +func (x *EncryptResponse) GetVerifiedPlaintextCrc32C() bool { + if x != nil { + return x.VerifiedPlaintextCrc32C + } + return false +} + +func (x *EncryptResponse) GetVerifiedAdditionalAuthenticatedDataCrc32C() bool { + if x != nil { + return x.VerifiedAdditionalAuthenticatedDataCrc32C + } + return false +} + +func (x *EncryptResponse) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +// Response message for +// [KeyManagementService.Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. +type DecryptResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The decrypted data originally supplied in + // [EncryptRequest.plaintext][google.cloud.kms.v1.EncryptRequest.plaintext]. + Plaintext []byte `protobuf:"bytes,1,opt,name=plaintext,proto3" json:"plaintext,omitempty"` + // Integrity verification field. A CRC32C checksum of the returned + // [DecryptResponse.plaintext][google.cloud.kms.v1.DecryptResponse.plaintext]. + // An integrity check of + // [DecryptResponse.plaintext][google.cloud.kms.v1.DecryptResponse.plaintext] + // can be performed by computing the CRC32C checksum of + // [DecryptResponse.plaintext][google.cloud.kms.v1.DecryptResponse.plaintext] + // and comparing your results to this field. Discard the response in case of + // non-matching checksum values, and perform a limited number of retries. A + // persistent mismatch may indicate an issue in your computation of the CRC32C + // checksum. Note: receiving this response message indicates that + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] is able to + // successfully decrypt the + // [ciphertext][google.cloud.kms.v1.DecryptRequest.ciphertext]. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + PlaintextCrc32C *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=plaintext_crc32c,json=plaintextCrc32c,proto3" json:"plaintext_crc32c,omitempty"` + // Whether the Decryption was performed using the primary key version. + UsedPrimary bool `protobuf:"varint,3,opt,name=used_primary,json=usedPrimary,proto3" json:"used_primary,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used in + // decryption. + ProtectionLevel ProtectionLevel `protobuf:"varint,4,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` +} + +func (x *DecryptResponse) Reset() { + *x = DecryptResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DecryptResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DecryptResponse) ProtoMessage() {} + +func (x *DecryptResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DecryptResponse.ProtoReflect.Descriptor instead. +func (*DecryptResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{31} +} + +func (x *DecryptResponse) GetPlaintext() []byte { + if x != nil { + return x.Plaintext + } + return nil +} + +func (x *DecryptResponse) GetPlaintextCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.PlaintextCrc32C + } + return nil +} + +func (x *DecryptResponse) GetUsedPrimary() bool { + if x != nil { + return x.UsedPrimary + } + return false +} + +func (x *DecryptResponse) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +// Response message for +// [KeyManagementService.AsymmetricSign][google.cloud.kms.v1.KeyManagementService.AsymmetricSign]. +type AsymmetricSignResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The created signature. + Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` + // Integrity verification field. A CRC32C checksum of the returned + // [AsymmetricSignResponse.signature][google.cloud.kms.v1.AsymmetricSignResponse.signature]. + // An integrity check of + // [AsymmetricSignResponse.signature][google.cloud.kms.v1.AsymmetricSignResponse.signature] + // can be performed by computing the CRC32C checksum of + // [AsymmetricSignResponse.signature][google.cloud.kms.v1.AsymmetricSignResponse.signature] + // and comparing your results to this field. Discard the response in case of + // non-matching checksum values, and perform a limited number of retries. A + // persistent mismatch may indicate an issue in your computation of the CRC32C + // checksum. Note: This field is defined as int64 for reasons of compatibility + // across different languages. However, it is a non-negative integer, which + // will never exceed 2^32-1, and can be safely downconverted to uint32 in + // languages that support this type. + SignatureCrc32C *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=signature_crc32c,json=signatureCrc32c,proto3" json:"signature_crc32c,omitempty"` + // Integrity verification field. A flag indicating whether + // [AsymmetricSignRequest.digest_crc32c][google.cloud.kms.v1.AsymmetricSignRequest.digest_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the + // [digest][google.cloud.kms.v1.AsymmetricSignRequest.digest]. A false value + // of this field indicates either that + // [AsymmetricSignRequest.digest_crc32c][google.cloud.kms.v1.AsymmetricSignRequest.digest_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [AsymmetricSignRequest.digest_crc32c][google.cloud.kms.v1.AsymmetricSignRequest.digest_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedDigestCrc32C bool `protobuf:"varint,3,opt,name=verified_digest_crc32c,json=verifiedDigestCrc32c,proto3" json:"verified_digest_crc32c,omitempty"` + // The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used for signing. + // Check this field to verify that the intended resource was used for signing. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Integrity verification field. A flag indicating whether + // [AsymmetricSignRequest.data_crc32c][google.cloud.kms.v1.AsymmetricSignRequest.data_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the + // [data][google.cloud.kms.v1.AsymmetricSignRequest.data]. A false value of + // this field indicates either that + // [AsymmetricSignRequest.data_crc32c][google.cloud.kms.v1.AsymmetricSignRequest.data_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [AsymmetricSignRequest.data_crc32c][google.cloud.kms.v1.AsymmetricSignRequest.data_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedDataCrc32C bool `protobuf:"varint,5,opt,name=verified_data_crc32c,json=verifiedDataCrc32c,proto3" json:"verified_data_crc32c,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used for signing. + ProtectionLevel ProtectionLevel `protobuf:"varint,6,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` +} + +func (x *AsymmetricSignResponse) Reset() { + *x = AsymmetricSignResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AsymmetricSignResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AsymmetricSignResponse) ProtoMessage() {} + +func (x *AsymmetricSignResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AsymmetricSignResponse.ProtoReflect.Descriptor instead. +func (*AsymmetricSignResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{32} +} + +func (x *AsymmetricSignResponse) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +func (x *AsymmetricSignResponse) GetSignatureCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.SignatureCrc32C + } + return nil +} + +func (x *AsymmetricSignResponse) GetVerifiedDigestCrc32C() bool { + if x != nil { + return x.VerifiedDigestCrc32C + } + return false +} + +func (x *AsymmetricSignResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AsymmetricSignResponse) GetVerifiedDataCrc32C() bool { + if x != nil { + return x.VerifiedDataCrc32C + } + return false +} + +func (x *AsymmetricSignResponse) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +// Response message for +// [KeyManagementService.AsymmetricDecrypt][google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt]. +type AsymmetricDecryptResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The decrypted data originally encrypted with the matching public key. + Plaintext []byte `protobuf:"bytes,1,opt,name=plaintext,proto3" json:"plaintext,omitempty"` + // Integrity verification field. A CRC32C checksum of the returned + // [AsymmetricDecryptResponse.plaintext][google.cloud.kms.v1.AsymmetricDecryptResponse.plaintext]. + // An integrity check of + // [AsymmetricDecryptResponse.plaintext][google.cloud.kms.v1.AsymmetricDecryptResponse.plaintext] + // can be performed by computing the CRC32C checksum of + // [AsymmetricDecryptResponse.plaintext][google.cloud.kms.v1.AsymmetricDecryptResponse.plaintext] + // and comparing your results to this field. Discard the response in case of + // non-matching checksum values, and perform a limited number of retries. A + // persistent mismatch may indicate an issue in your computation of the CRC32C + // checksum. Note: This field is defined as int64 for reasons of compatibility + // across different languages. However, it is a non-negative integer, which + // will never exceed 2^32-1, and can be safely downconverted to uint32 in + // languages that support this type. + PlaintextCrc32C *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=plaintext_crc32c,json=plaintextCrc32c,proto3" json:"plaintext_crc32c,omitempty"` + // Integrity verification field. A flag indicating whether + // [AsymmetricDecryptRequest.ciphertext_crc32c][google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the + // [ciphertext][google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext]. A + // false value of this field indicates either that + // [AsymmetricDecryptRequest.ciphertext_crc32c][google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [AsymmetricDecryptRequest.ciphertext_crc32c][google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedCiphertextCrc32C bool `protobuf:"varint,3,opt,name=verified_ciphertext_crc32c,json=verifiedCiphertextCrc32c,proto3" json:"verified_ciphertext_crc32c,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used in + // decryption. + ProtectionLevel ProtectionLevel `protobuf:"varint,4,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` +} + +func (x *AsymmetricDecryptResponse) Reset() { + *x = AsymmetricDecryptResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AsymmetricDecryptResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AsymmetricDecryptResponse) ProtoMessage() {} + +func (x *AsymmetricDecryptResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AsymmetricDecryptResponse.ProtoReflect.Descriptor instead. +func (*AsymmetricDecryptResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{33} +} + +func (x *AsymmetricDecryptResponse) GetPlaintext() []byte { + if x != nil { + return x.Plaintext + } + return nil +} + +func (x *AsymmetricDecryptResponse) GetPlaintextCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.PlaintextCrc32C + } + return nil +} + +func (x *AsymmetricDecryptResponse) GetVerifiedCiphertextCrc32C() bool { + if x != nil { + return x.VerifiedCiphertextCrc32C + } + return false +} + +func (x *AsymmetricDecryptResponse) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +// Response message for +// [KeyManagementService.MacSign][google.cloud.kms.v1.KeyManagementService.MacSign]. +type MacSignResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used for signing. + // Check this field to verify that the intended resource was used for signing. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The created signature. + Mac []byte `protobuf:"bytes,2,opt,name=mac,proto3" json:"mac,omitempty"` + // Integrity verification field. A CRC32C checksum of the returned + // [MacSignResponse.mac][google.cloud.kms.v1.MacSignResponse.mac]. An + // integrity check of + // [MacSignResponse.mac][google.cloud.kms.v1.MacSignResponse.mac] can be + // performed by computing the CRC32C checksum of + // [MacSignResponse.mac][google.cloud.kms.v1.MacSignResponse.mac] and + // comparing your results to this field. Discard the response in case of + // non-matching checksum values, and perform a limited number of retries. A + // persistent mismatch may indicate an issue in your computation of the CRC32C + // checksum. Note: This field is defined as int64 for reasons of compatibility + // across different languages. However, it is a non-negative integer, which + // will never exceed 2^32-1, and can be safely downconverted to uint32 in + // languages that support this type. + MacCrc32C *wrapperspb.Int64Value `protobuf:"bytes,3,opt,name=mac_crc32c,json=macCrc32c,proto3" json:"mac_crc32c,omitempty"` + // Integrity verification field. A flag indicating whether + // [MacSignRequest.data_crc32c][google.cloud.kms.v1.MacSignRequest.data_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the + // [data][google.cloud.kms.v1.MacSignRequest.data]. A false value of this + // field indicates either that + // [MacSignRequest.data_crc32c][google.cloud.kms.v1.MacSignRequest.data_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [MacSignRequest.data_crc32c][google.cloud.kms.v1.MacSignRequest.data_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedDataCrc32C bool `protobuf:"varint,4,opt,name=verified_data_crc32c,json=verifiedDataCrc32c,proto3" json:"verified_data_crc32c,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used for signing. + ProtectionLevel ProtectionLevel `protobuf:"varint,5,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` +} + +func (x *MacSignResponse) Reset() { + *x = MacSignResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MacSignResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MacSignResponse) ProtoMessage() {} + +func (x *MacSignResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MacSignResponse.ProtoReflect.Descriptor instead. +func (*MacSignResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{34} +} + +func (x *MacSignResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MacSignResponse) GetMac() []byte { + if x != nil { + return x.Mac + } + return nil +} + +func (x *MacSignResponse) GetMacCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.MacCrc32C + } + return nil +} + +func (x *MacSignResponse) GetVerifiedDataCrc32C() bool { + if x != nil { + return x.VerifiedDataCrc32C + } + return false +} + +func (x *MacSignResponse) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +// Response message for +// [KeyManagementService.MacVerify][google.cloud.kms.v1.KeyManagementService.MacVerify]. +type MacVerifyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used for + // verification. Check this field to verify that the intended resource was + // used for verification. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // This field indicates whether or not the verification operation for + // [MacVerifyRequest.mac][google.cloud.kms.v1.MacVerifyRequest.mac] over + // [MacVerifyRequest.data][google.cloud.kms.v1.MacVerifyRequest.data] was + // successful. + Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` + // Integrity verification field. A flag indicating whether + // [MacVerifyRequest.data_crc32c][google.cloud.kms.v1.MacVerifyRequest.data_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the + // [data][google.cloud.kms.v1.MacVerifyRequest.data]. A false value of this + // field indicates either that + // [MacVerifyRequest.data_crc32c][google.cloud.kms.v1.MacVerifyRequest.data_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [MacVerifyRequest.data_crc32c][google.cloud.kms.v1.MacVerifyRequest.data_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedDataCrc32C bool `protobuf:"varint,3,opt,name=verified_data_crc32c,json=verifiedDataCrc32c,proto3" json:"verified_data_crc32c,omitempty"` + // Integrity verification field. A flag indicating whether + // [MacVerifyRequest.mac_crc32c][google.cloud.kms.v1.MacVerifyRequest.mac_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the + // [data][google.cloud.kms.v1.MacVerifyRequest.mac]. A false value of this + // field indicates either that + // [MacVerifyRequest.mac_crc32c][google.cloud.kms.v1.MacVerifyRequest.mac_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [MacVerifyRequest.mac_crc32c][google.cloud.kms.v1.MacVerifyRequest.mac_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedMacCrc32C bool `protobuf:"varint,4,opt,name=verified_mac_crc32c,json=verifiedMacCrc32c,proto3" json:"verified_mac_crc32c,omitempty"` + // Integrity verification field. This value is used for the integrity + // verification of [MacVerifyResponse.success]. If the value of this field + // contradicts the value of [MacVerifyResponse.success], discard the response + // and perform a limited number of retries. + VerifiedSuccessIntegrity bool `protobuf:"varint,5,opt,name=verified_success_integrity,json=verifiedSuccessIntegrity,proto3" json:"verified_success_integrity,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used for + // verification. + ProtectionLevel ProtectionLevel `protobuf:"varint,6,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` +} + +func (x *MacVerifyResponse) Reset() { + *x = MacVerifyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MacVerifyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MacVerifyResponse) ProtoMessage() {} + +func (x *MacVerifyResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MacVerifyResponse.ProtoReflect.Descriptor instead. +func (*MacVerifyResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{35} +} + +func (x *MacVerifyResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MacVerifyResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *MacVerifyResponse) GetVerifiedDataCrc32C() bool { + if x != nil { + return x.VerifiedDataCrc32C + } + return false +} + +func (x *MacVerifyResponse) GetVerifiedMacCrc32C() bool { + if x != nil { + return x.VerifiedMacCrc32C + } + return false +} + +func (x *MacVerifyResponse) GetVerifiedSuccessIntegrity() bool { + if x != nil { + return x.VerifiedSuccessIntegrity + } + return false +} + +func (x *MacVerifyResponse) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +// Response message for +// [KeyManagementService.GenerateRandomBytes][google.cloud.kms.v1.KeyManagementService.GenerateRandomBytes]. +type GenerateRandomBytesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The generated data. + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // Integrity verification field. A CRC32C checksum of the returned + // [GenerateRandomBytesResponse.data][google.cloud.kms.v1.GenerateRandomBytesResponse.data]. + // An integrity check of + // [GenerateRandomBytesResponse.data][google.cloud.kms.v1.GenerateRandomBytesResponse.data] + // can be performed by computing the CRC32C checksum of + // [GenerateRandomBytesResponse.data][google.cloud.kms.v1.GenerateRandomBytesResponse.data] + // and comparing your results to this field. Discard the response in case of + // non-matching checksum values, and perform a limited number of retries. A + // persistent mismatch may indicate an issue in your computation of the CRC32C + // checksum. Note: This field is defined as int64 for reasons of compatibility + // across different languages. However, it is a non-negative integer, which + // will never exceed 2^32-1, and can be safely downconverted to uint32 in + // languages that support this type. + DataCrc32C *wrapperspb.Int64Value `protobuf:"bytes,3,opt,name=data_crc32c,json=dataCrc32c,proto3" json:"data_crc32c,omitempty"` +} + +func (x *GenerateRandomBytesResponse) Reset() { + *x = GenerateRandomBytesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GenerateRandomBytesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenerateRandomBytesResponse) ProtoMessage() {} + +func (x *GenerateRandomBytesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GenerateRandomBytesResponse.ProtoReflect.Descriptor instead. +func (*GenerateRandomBytesResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{36} +} + +func (x *GenerateRandomBytesResponse) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *GenerateRandomBytesResponse) GetDataCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.DataCrc32C + } + return nil +} + +// A [Digest][google.cloud.kms.v1.Digest] holds a cryptographic message digest. +type Digest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The message digest. + // + // Types that are assignable to Digest: + // + // *Digest_Sha256 + // *Digest_Sha384 + // *Digest_Sha512 + Digest isDigest_Digest `protobuf_oneof:"digest"` +} + +func (x *Digest) Reset() { + *x = Digest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Digest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Digest) ProtoMessage() {} + +func (x *Digest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Digest.ProtoReflect.Descriptor instead. +func (*Digest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{37} +} + +func (m *Digest) GetDigest() isDigest_Digest { + if m != nil { + return m.Digest + } + return nil +} + +func (x *Digest) GetSha256() []byte { + if x, ok := x.GetDigest().(*Digest_Sha256); ok { + return x.Sha256 + } + return nil +} + +func (x *Digest) GetSha384() []byte { + if x, ok := x.GetDigest().(*Digest_Sha384); ok { + return x.Sha384 + } + return nil +} + +func (x *Digest) GetSha512() []byte { + if x, ok := x.GetDigest().(*Digest_Sha512); ok { + return x.Sha512 + } + return nil +} + +type isDigest_Digest interface { + isDigest_Digest() +} + +type Digest_Sha256 struct { + // A message digest produced with the SHA-256 algorithm. + Sha256 []byte `protobuf:"bytes,1,opt,name=sha256,proto3,oneof"` +} + +type Digest_Sha384 struct { + // A message digest produced with the SHA-384 algorithm. + Sha384 []byte `protobuf:"bytes,2,opt,name=sha384,proto3,oneof"` +} + +type Digest_Sha512 struct { + // A message digest produced with the SHA-512 algorithm. + Sha512 []byte `protobuf:"bytes,3,opt,name=sha512,proto3,oneof"` +} + +func (*Digest_Sha256) isDigest_Digest() {} + +func (*Digest_Sha384) isDigest_Digest() {} + +func (*Digest_Sha512) isDigest_Digest() {} + +// Cloud KMS metadata for the given +// [google.cloud.location.Location][google.cloud.location.Location]. +type LocationMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Indicates whether [CryptoKeys][google.cloud.kms.v1.CryptoKey] with + // [protection_level][google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level] + // [HSM][google.cloud.kms.v1.ProtectionLevel.HSM] can be created in this + // location. + HsmAvailable bool `protobuf:"varint,1,opt,name=hsm_available,json=hsmAvailable,proto3" json:"hsm_available,omitempty"` + // Indicates whether [CryptoKeys][google.cloud.kms.v1.CryptoKey] with + // [protection_level][google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level] + // [EXTERNAL][google.cloud.kms.v1.ProtectionLevel.EXTERNAL] can be created in + // this location. + EkmAvailable bool `protobuf:"varint,2,opt,name=ekm_available,json=ekmAvailable,proto3" json:"ekm_available,omitempty"` +} + +func (x *LocationMetadata) Reset() { + *x = LocationMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocationMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocationMetadata) ProtoMessage() {} + +func (x *LocationMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocationMetadata.ProtoReflect.Descriptor instead. +func (*LocationMetadata) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{38} +} + +func (x *LocationMetadata) GetHsmAvailable() bool { + if x != nil { + return x.HsmAvailable + } + return false +} + +func (x *LocationMetadata) GetEkmAvailable() bool { + if x != nil { + return x.EkmAvailable + } + return false +} + +var File_google_cloud_kms_v1_service_proto protoreflect.FileDescriptor + +var file_google_cloud_kms_v1_service_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b, + 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x76, 0x31, + 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xdb, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x1e, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, + 0x22, 0xba, 0x02, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x21, 0x0a, 0x1f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4b, 0x65, 0x79, 0x52, + 0x69, 0x6e, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, + 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x5d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x69, 0x65, + 0x77, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x56, + 0x69, 0x65, 0x77, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x56, 0x69, 0x65, 0x77, + 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, + 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x22, 0xb4, 0x02, + 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, + 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, + 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x4e, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x56, 0x69, 0x65, + 0x77, 0x52, 0x04, 0x76, 0x69, 0x65, 0x77, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6f, 0x72, 0x64, + 0x65, 0x72, 0x42, 0x79, 0x22, 0xdb, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, + 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, + 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, + 0x42, 0x79, 0x22, 0x98, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x09, 0x6b, + 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, + 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xa0, 0x01, + 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0b, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0a, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, + 0x22, 0xbd, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x55, 0x0a, 0x13, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, + 0x22, 0xa0, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, + 0x6f, 0x62, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0b, 0x69, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, + 0x52, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x26, 0x0a, 0x0f, + 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, + 0x69, 0x7a, 0x65, 0x22, 0x50, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x54, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x62, 0x0a, 0x1a, 0x47, + 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, + 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0x5b, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x54, 0x0a, 0x13, + 0x47, 0x65, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x22, 0xbc, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, + 0x52, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x23, + 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x52, + 0x69, 0x6e, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x22, 0x89, 0x02, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4b, 0x65, + 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x27, 0x0a, + 0x0d, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x0a, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x09, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x41, 0x0a, 0x1d, 0x73, 0x6b, + 0x69, 0x70, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x1a, 0x73, 0x6b, 0x69, 0x70, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbc, 0x01, + 0x0a, 0x1d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, + 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x12, 0x58, 0x0a, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x10, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xbe, 0x03, 0x0a, + 0x1d, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, + 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x5e, 0x0a, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, + 0x41, 0x01, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x10, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, + 0x72, 0x69, 0x74, 0x68, 0x6d, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, + 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x22, 0x0a, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, + 0x6a, 0x6f, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, + 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x24, 0x0a, 0x0b, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x12, + 0x34, 0x0a, 0x13, 0x72, 0x73, 0x61, 0x5f, 0x61, 0x65, 0x73, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, + 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x48, 0x00, 0x52, 0x10, 0x72, 0x73, 0x61, 0x41, 0x65, 0x73, 0x57, 0x72, 0x61, 0x70, 0x70, + 0x65, 0x64, 0x4b, 0x65, 0x79, 0x42, 0x16, 0x0a, 0x14, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0xc6, 0x01, + 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, + 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, + 0x0a, 0x1f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x0d, 0x69, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, + 0x49, 0x64, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x69, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x22, 0x9e, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x42, 0x0a, 0x0a, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbb, 0x01, 0x0a, 0x1d, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x58, 0x0a, 0x12, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x10, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, + 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x9d, 0x01, 0x0a, 0x24, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, + 0x15, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x66, 0x0a, 0x1e, 0x44, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x66, 0x0a, + 0x1e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xdb, 0x02, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, + 0x2a, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x47, 0x0a, 0x1d, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, + 0x61, 0x74, 0x61, 0x12, 0x4b, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x0f, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, + 0x12, 0x71, 0x0a, 0x24, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, + 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x21, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, + 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x22, 0xff, 0x02, 0x0a, 0x0e, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, + 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, + 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x12, 0x47, 0x0a, 0x1d, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, + 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x11, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, + 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x10, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, + 0x32, 0x63, 0x12, 0x71, 0x0a, 0x24, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x21, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, + 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xba, 0x02, 0x0a, 0x15, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, + 0x45, 0x0a, 0x0d, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, + 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x17, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x41, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, + 0x32, 0x63, 0x22, 0xd4, 0x01, 0x0a, 0x18, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, + 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, + 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x12, 0x4d, 0x0a, 0x11, 0x63, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, + 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xb2, 0x01, 0x0a, 0x0e, 0x4d, 0x61, + 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x41, 0x0a, 0x0b, 0x64, + 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x8c, + 0x02, 0x0a, 0x10, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x41, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x15, 0x0a, 0x03, 0x6d, 0x61, 0x63, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x12, 0x3f, 0x0a, 0x0a, + 0x6d, 0x61, 0x63, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xac, 0x01, + 0x0a, 0x1a, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x10, 0x70, + 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, + 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0xfe, 0x02, 0x0a, + 0x0f, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, + 0x74, 0x65, 0x78, 0x74, 0x12, 0x48, 0x0a, 0x11, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, + 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x63, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x3a, + 0x0a, 0x19, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x70, 0x6c, 0x61, 0x69, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x50, 0x6c, 0x61, 0x69, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x60, 0x0a, 0x2d, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x29, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x41, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x4f, 0x0a, 0x10, + 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, + 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, + 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0xeb, 0x01, + 0x0a, 0x0f, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, + 0x46, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, + 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x73, 0x65, 0x64, 0x5f, + 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, + 0x73, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, + 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0xcb, 0x02, 0x0a, 0x16, + 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x12, 0x46, 0x0a, 0x10, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x34, 0x0a, 0x16, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x5f, + 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x43, 0x72, 0x63, 0x33, + 0x32, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x44, 0x61, + 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x90, 0x02, 0x0a, 0x19, 0x41, 0x73, + 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x69, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x46, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x70, 0x6c, + 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x3c, 0x0a, + 0x1a, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, + 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x18, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x4f, 0x0a, 0x10, 0x70, + 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, + 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0xf6, 0x01, 0x0a, + 0x0f, 0x4d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x12, 0x3a, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x5f, 0x63, 0x72, + 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x43, 0x72, 0x63, 0x33, + 0x32, 0x63, 0x12, 0x30, 0x0a, 0x14, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x12, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, + 0x63, 0x33, 0x32, 0x63, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0xb2, 0x02, 0x0a, 0x11, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x2e, 0x0a, 0x13, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6d, 0x61, 0x63, 0x5f, 0x63, 0x72, 0x63, 0x33, + 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x4d, 0x61, 0x63, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x3c, 0x0a, 0x1a, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x18, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x49, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x69, 0x74, 0x79, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, + 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x6f, 0x0a, 0x1b, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3c, 0x0a, + 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x60, 0x0a, 0x06, 0x44, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x12, + 0x18, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, + 0x00, 0x52, 0x06, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x12, 0x18, 0x0a, 0x06, 0x73, 0x68, 0x61, + 0x35, 0x31, 0x32, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x73, 0x68, 0x61, + 0x35, 0x31, 0x32, 0x42, 0x08, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x5c, 0x0a, + 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x73, 0x6d, 0x5f, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, + 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x68, 0x73, 0x6d, 0x41, 0x76, 0x61, + 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6b, 0x6d, 0x5f, 0x61, 0x76, + 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x65, + 0x6b, 0x6d, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x32, 0x86, 0x2b, 0x0a, 0x14, + 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0xa2, 0x01, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, + 0x52, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x2e, 0x12, 0x2c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, + 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xb5, 0x01, 0x0a, 0x0e, 0x4c, 0x69, + 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x2a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, + 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0xde, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x5e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x4d, 0x2f, 0x76, 0x31, 0x2f, + 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, + 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x12, 0xb5, 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4a, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, + 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, + 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x8f, 0x01, 0x0a, 0x0a, 0x47, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x22, + 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xa2, 0x01, 0x0a, + 0x0c, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x28, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x22, 0x48, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, + 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0xcb, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x5c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x4d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, + 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0xc0, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, + 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0x66, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x59, 0x12, 0x57, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0xda, 0x41, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0xa2, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, + 0x4a, 0x6f, 0x62, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x22, 0x48, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, + 0x2f, 0x2a, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x2f, 0x2a, 0x7d, + 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb6, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x69, + 0x6e, 0x67, 0x22, 0x5c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x22, 0x2c, 0x2f, 0x76, 0x31, 0x2f, + 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x3a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, + 0x6e, 0x67, 0xda, 0x41, 0x1b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6b, 0x65, 0x79, 0x5f, + 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x64, 0x2c, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, + 0x12, 0xcf, 0x01, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x22, 0x6f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, + 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x4b, 0x65, 0x79, 0x73, 0x3a, 0x0a, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, + 0xda, 0x41, 0x1f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x2c, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, + 0x65, 0x79, 0x12, 0xfb, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x63, 0x22, 0x4d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, + 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x3a, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0xda, 0x41, 0x19, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0xd4, 0x01, 0x0a, 0x16, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x59, 0x22, 0x54, + 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x69, 0x6d, + 0x70, 0x6f, 0x72, 0x74, 0x3a, 0x01, 0x2a, 0x12, 0xcf, 0x01, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x2b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, + 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x22, 0x6f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, + 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x3a, 0x0a, 0x69, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0xda, 0x41, 0x1f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x2c, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x2c, 0x69, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x12, 0xd1, 0x01, 0x0a, 0x0f, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x2b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x22, 0x71, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x52, 0x32, 0x44, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, + 0x6b, 0x65, 0x79, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, + 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x0a, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x5f, 0x6b, 0x65, 0x79, 0xda, 0x41, 0x16, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, + 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x93, 0x02, + 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0x9d, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x76, 0x32, 0x60, 0x2f, 0x76, + 0x31, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x12, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0xda, 0x41, 0x1e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x12, 0xf2, 0x01, 0x0a, 0x1d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x22, 0x76, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x53, 0x22, 0x4e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, + 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, + 0x2f, 0x2a, 0x7d, 0x3a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x1a, 0x6e, 0x61, + 0x6d, 0x65, 0x2c, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x12, 0xde, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x73, + 0x74, 0x72, 0x6f, 0x79, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x72, + 0x6f, 0x79, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5a, 0x22, 0x55, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, + 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, + 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x3a, + 0x01, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xde, 0x01, 0x0a, 0x17, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5a, 0x22, 0x55, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, + 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb4, 0x01, 0x0a, 0x07, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x5e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x22, 0x42, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, + 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x3a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x3a, 0x01, 0x2a, + 0xda, 0x41, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x12, 0xb4, 0x01, 0x0a, 0x07, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x23, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x46, + 0x22, 0x41, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x65, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x0f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x12, 0xe0, 0x01, 0x0a, 0x0e, 0x41, 0x73, 0x79, + 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x12, 0x2a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x73, + 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x75, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x61, 0x22, 0x5c, 0x2f, 0x76, + 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, + 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x73, 0x79, 0x6d, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x0b, + 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0xf0, 0x01, 0x0a, 0x11, + 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x7c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x64, 0x22, 0x5f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, + 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, + 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x0f, 0x6e, + 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x12, 0xc2, + 0x01, 0x0a, 0x07, 0x4d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x4d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5a, 0x22, 0x55, 0x2f, + 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, + 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x61, 0x63, + 0x53, 0x69, 0x67, 0x6e, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, + 0x61, 0x74, 0x61, 0x12, 0xce, 0x01, 0x0a, 0x09, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, + 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x72, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5c, 0x22, 0x57, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, + 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, + 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x61, 0x74, 0x61, + 0x2c, 0x6d, 0x61, 0x63, 0x12, 0xe7, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, + 0x6d, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, + 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x6d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x26, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2c, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x2c, 0x70, 0x72, + 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x1a, 0x74, + 0xca, 0x41, 0x17, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, + 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x8c, 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x42, 0x08, 0x4b, 0x6d, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x36, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x76, 0x31, + 0x3b, 0x6b, 0x6d, 0x73, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, + 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_cloud_kms_v1_service_proto_rawDescOnce sync.Once + file_google_cloud_kms_v1_service_proto_rawDescData = file_google_cloud_kms_v1_service_proto_rawDesc +) + +func file_google_cloud_kms_v1_service_proto_rawDescGZIP() []byte { + file_google_cloud_kms_v1_service_proto_rawDescOnce.Do(func() { + file_google_cloud_kms_v1_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_kms_v1_service_proto_rawDescData) + }) + return file_google_cloud_kms_v1_service_proto_rawDescData +} + +var file_google_cloud_kms_v1_service_proto_msgTypes = make([]protoimpl.MessageInfo, 39) +var file_google_cloud_kms_v1_service_proto_goTypes = []interface{}{ + (*ListKeyRingsRequest)(nil), // 0: google.cloud.kms.v1.ListKeyRingsRequest + (*ListCryptoKeysRequest)(nil), // 1: google.cloud.kms.v1.ListCryptoKeysRequest + (*ListCryptoKeyVersionsRequest)(nil), // 2: google.cloud.kms.v1.ListCryptoKeyVersionsRequest + (*ListImportJobsRequest)(nil), // 3: google.cloud.kms.v1.ListImportJobsRequest + (*ListKeyRingsResponse)(nil), // 4: google.cloud.kms.v1.ListKeyRingsResponse + (*ListCryptoKeysResponse)(nil), // 5: google.cloud.kms.v1.ListCryptoKeysResponse + (*ListCryptoKeyVersionsResponse)(nil), // 6: google.cloud.kms.v1.ListCryptoKeyVersionsResponse + (*ListImportJobsResponse)(nil), // 7: google.cloud.kms.v1.ListImportJobsResponse + (*GetKeyRingRequest)(nil), // 8: google.cloud.kms.v1.GetKeyRingRequest + (*GetCryptoKeyRequest)(nil), // 9: google.cloud.kms.v1.GetCryptoKeyRequest + (*GetCryptoKeyVersionRequest)(nil), // 10: google.cloud.kms.v1.GetCryptoKeyVersionRequest + (*GetPublicKeyRequest)(nil), // 11: google.cloud.kms.v1.GetPublicKeyRequest + (*GetImportJobRequest)(nil), // 12: google.cloud.kms.v1.GetImportJobRequest + (*CreateKeyRingRequest)(nil), // 13: google.cloud.kms.v1.CreateKeyRingRequest + (*CreateCryptoKeyRequest)(nil), // 14: google.cloud.kms.v1.CreateCryptoKeyRequest + (*CreateCryptoKeyVersionRequest)(nil), // 15: google.cloud.kms.v1.CreateCryptoKeyVersionRequest + (*ImportCryptoKeyVersionRequest)(nil), // 16: google.cloud.kms.v1.ImportCryptoKeyVersionRequest + (*CreateImportJobRequest)(nil), // 17: google.cloud.kms.v1.CreateImportJobRequest + (*UpdateCryptoKeyRequest)(nil), // 18: google.cloud.kms.v1.UpdateCryptoKeyRequest + (*UpdateCryptoKeyVersionRequest)(nil), // 19: google.cloud.kms.v1.UpdateCryptoKeyVersionRequest + (*UpdateCryptoKeyPrimaryVersionRequest)(nil), // 20: google.cloud.kms.v1.UpdateCryptoKeyPrimaryVersionRequest + (*DestroyCryptoKeyVersionRequest)(nil), // 21: google.cloud.kms.v1.DestroyCryptoKeyVersionRequest + (*RestoreCryptoKeyVersionRequest)(nil), // 22: google.cloud.kms.v1.RestoreCryptoKeyVersionRequest + (*EncryptRequest)(nil), // 23: google.cloud.kms.v1.EncryptRequest + (*DecryptRequest)(nil), // 24: google.cloud.kms.v1.DecryptRequest + (*AsymmetricSignRequest)(nil), // 25: google.cloud.kms.v1.AsymmetricSignRequest + (*AsymmetricDecryptRequest)(nil), // 26: google.cloud.kms.v1.AsymmetricDecryptRequest + (*MacSignRequest)(nil), // 27: google.cloud.kms.v1.MacSignRequest + (*MacVerifyRequest)(nil), // 28: google.cloud.kms.v1.MacVerifyRequest + (*GenerateRandomBytesRequest)(nil), // 29: google.cloud.kms.v1.GenerateRandomBytesRequest + (*EncryptResponse)(nil), // 30: google.cloud.kms.v1.EncryptResponse + (*DecryptResponse)(nil), // 31: google.cloud.kms.v1.DecryptResponse + (*AsymmetricSignResponse)(nil), // 32: google.cloud.kms.v1.AsymmetricSignResponse + (*AsymmetricDecryptResponse)(nil), // 33: google.cloud.kms.v1.AsymmetricDecryptResponse + (*MacSignResponse)(nil), // 34: google.cloud.kms.v1.MacSignResponse + (*MacVerifyResponse)(nil), // 35: google.cloud.kms.v1.MacVerifyResponse + (*GenerateRandomBytesResponse)(nil), // 36: google.cloud.kms.v1.GenerateRandomBytesResponse + (*Digest)(nil), // 37: google.cloud.kms.v1.Digest + (*LocationMetadata)(nil), // 38: google.cloud.kms.v1.LocationMetadata + (CryptoKeyVersion_CryptoKeyVersionView)(0), // 39: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionView + (*KeyRing)(nil), // 40: google.cloud.kms.v1.KeyRing + (*CryptoKey)(nil), // 41: google.cloud.kms.v1.CryptoKey + (*CryptoKeyVersion)(nil), // 42: google.cloud.kms.v1.CryptoKeyVersion + (*ImportJob)(nil), // 43: google.cloud.kms.v1.ImportJob + (CryptoKeyVersion_CryptoKeyVersionAlgorithm)(0), // 44: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm + (*fieldmaskpb.FieldMask)(nil), // 45: google.protobuf.FieldMask + (*wrapperspb.Int64Value)(nil), // 46: google.protobuf.Int64Value + (ProtectionLevel)(0), // 47: google.cloud.kms.v1.ProtectionLevel + (*PublicKey)(nil), // 48: google.cloud.kms.v1.PublicKey +} +var file_google_cloud_kms_v1_service_proto_depIdxs = []int32{ + 39, // 0: google.cloud.kms.v1.ListCryptoKeysRequest.version_view:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionView + 39, // 1: google.cloud.kms.v1.ListCryptoKeyVersionsRequest.view:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionView + 40, // 2: google.cloud.kms.v1.ListKeyRingsResponse.key_rings:type_name -> google.cloud.kms.v1.KeyRing + 41, // 3: google.cloud.kms.v1.ListCryptoKeysResponse.crypto_keys:type_name -> google.cloud.kms.v1.CryptoKey + 42, // 4: google.cloud.kms.v1.ListCryptoKeyVersionsResponse.crypto_key_versions:type_name -> google.cloud.kms.v1.CryptoKeyVersion + 43, // 5: google.cloud.kms.v1.ListImportJobsResponse.import_jobs:type_name -> google.cloud.kms.v1.ImportJob + 40, // 6: google.cloud.kms.v1.CreateKeyRingRequest.key_ring:type_name -> google.cloud.kms.v1.KeyRing + 41, // 7: google.cloud.kms.v1.CreateCryptoKeyRequest.crypto_key:type_name -> google.cloud.kms.v1.CryptoKey + 42, // 8: google.cloud.kms.v1.CreateCryptoKeyVersionRequest.crypto_key_version:type_name -> google.cloud.kms.v1.CryptoKeyVersion + 44, // 9: google.cloud.kms.v1.ImportCryptoKeyVersionRequest.algorithm:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm + 43, // 10: google.cloud.kms.v1.CreateImportJobRequest.import_job:type_name -> google.cloud.kms.v1.ImportJob + 41, // 11: google.cloud.kms.v1.UpdateCryptoKeyRequest.crypto_key:type_name -> google.cloud.kms.v1.CryptoKey + 45, // 12: google.cloud.kms.v1.UpdateCryptoKeyRequest.update_mask:type_name -> google.protobuf.FieldMask + 42, // 13: google.cloud.kms.v1.UpdateCryptoKeyVersionRequest.crypto_key_version:type_name -> google.cloud.kms.v1.CryptoKeyVersion + 45, // 14: google.cloud.kms.v1.UpdateCryptoKeyVersionRequest.update_mask:type_name -> google.protobuf.FieldMask + 46, // 15: google.cloud.kms.v1.EncryptRequest.plaintext_crc32c:type_name -> google.protobuf.Int64Value + 46, // 16: google.cloud.kms.v1.EncryptRequest.additional_authenticated_data_crc32c:type_name -> google.protobuf.Int64Value + 46, // 17: google.cloud.kms.v1.DecryptRequest.ciphertext_crc32c:type_name -> google.protobuf.Int64Value + 46, // 18: google.cloud.kms.v1.DecryptRequest.additional_authenticated_data_crc32c:type_name -> google.protobuf.Int64Value + 37, // 19: google.cloud.kms.v1.AsymmetricSignRequest.digest:type_name -> google.cloud.kms.v1.Digest + 46, // 20: google.cloud.kms.v1.AsymmetricSignRequest.digest_crc32c:type_name -> google.protobuf.Int64Value + 46, // 21: google.cloud.kms.v1.AsymmetricSignRequest.data_crc32c:type_name -> google.protobuf.Int64Value + 46, // 22: google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext_crc32c:type_name -> google.protobuf.Int64Value + 46, // 23: google.cloud.kms.v1.MacSignRequest.data_crc32c:type_name -> google.protobuf.Int64Value + 46, // 24: google.cloud.kms.v1.MacVerifyRequest.data_crc32c:type_name -> google.protobuf.Int64Value + 46, // 25: google.cloud.kms.v1.MacVerifyRequest.mac_crc32c:type_name -> google.protobuf.Int64Value + 47, // 26: google.cloud.kms.v1.GenerateRandomBytesRequest.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 46, // 27: google.cloud.kms.v1.EncryptResponse.ciphertext_crc32c:type_name -> google.protobuf.Int64Value + 47, // 28: google.cloud.kms.v1.EncryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 46, // 29: google.cloud.kms.v1.DecryptResponse.plaintext_crc32c:type_name -> google.protobuf.Int64Value + 47, // 30: google.cloud.kms.v1.DecryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 46, // 31: google.cloud.kms.v1.AsymmetricSignResponse.signature_crc32c:type_name -> google.protobuf.Int64Value + 47, // 32: google.cloud.kms.v1.AsymmetricSignResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 46, // 33: google.cloud.kms.v1.AsymmetricDecryptResponse.plaintext_crc32c:type_name -> google.protobuf.Int64Value + 47, // 34: google.cloud.kms.v1.AsymmetricDecryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 46, // 35: google.cloud.kms.v1.MacSignResponse.mac_crc32c:type_name -> google.protobuf.Int64Value + 47, // 36: google.cloud.kms.v1.MacSignResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 47, // 37: google.cloud.kms.v1.MacVerifyResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 46, // 38: google.cloud.kms.v1.GenerateRandomBytesResponse.data_crc32c:type_name -> google.protobuf.Int64Value + 0, // 39: google.cloud.kms.v1.KeyManagementService.ListKeyRings:input_type -> google.cloud.kms.v1.ListKeyRingsRequest + 1, // 40: google.cloud.kms.v1.KeyManagementService.ListCryptoKeys:input_type -> google.cloud.kms.v1.ListCryptoKeysRequest + 2, // 41: google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions:input_type -> google.cloud.kms.v1.ListCryptoKeyVersionsRequest + 3, // 42: google.cloud.kms.v1.KeyManagementService.ListImportJobs:input_type -> google.cloud.kms.v1.ListImportJobsRequest + 8, // 43: google.cloud.kms.v1.KeyManagementService.GetKeyRing:input_type -> google.cloud.kms.v1.GetKeyRingRequest + 9, // 44: google.cloud.kms.v1.KeyManagementService.GetCryptoKey:input_type -> google.cloud.kms.v1.GetCryptoKeyRequest + 10, // 45: google.cloud.kms.v1.KeyManagementService.GetCryptoKeyVersion:input_type -> google.cloud.kms.v1.GetCryptoKeyVersionRequest + 11, // 46: google.cloud.kms.v1.KeyManagementService.GetPublicKey:input_type -> google.cloud.kms.v1.GetPublicKeyRequest + 12, // 47: google.cloud.kms.v1.KeyManagementService.GetImportJob:input_type -> google.cloud.kms.v1.GetImportJobRequest + 13, // 48: google.cloud.kms.v1.KeyManagementService.CreateKeyRing:input_type -> google.cloud.kms.v1.CreateKeyRingRequest + 14, // 49: google.cloud.kms.v1.KeyManagementService.CreateCryptoKey:input_type -> google.cloud.kms.v1.CreateCryptoKeyRequest + 15, // 50: google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion:input_type -> google.cloud.kms.v1.CreateCryptoKeyVersionRequest + 16, // 51: google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion:input_type -> google.cloud.kms.v1.ImportCryptoKeyVersionRequest + 17, // 52: google.cloud.kms.v1.KeyManagementService.CreateImportJob:input_type -> google.cloud.kms.v1.CreateImportJobRequest + 18, // 53: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKey:input_type -> google.cloud.kms.v1.UpdateCryptoKeyRequest + 19, // 54: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyVersion:input_type -> google.cloud.kms.v1.UpdateCryptoKeyVersionRequest + 20, // 55: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion:input_type -> google.cloud.kms.v1.UpdateCryptoKeyPrimaryVersionRequest + 21, // 56: google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion:input_type -> google.cloud.kms.v1.DestroyCryptoKeyVersionRequest + 22, // 57: google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion:input_type -> google.cloud.kms.v1.RestoreCryptoKeyVersionRequest + 23, // 58: google.cloud.kms.v1.KeyManagementService.Encrypt:input_type -> google.cloud.kms.v1.EncryptRequest + 24, // 59: google.cloud.kms.v1.KeyManagementService.Decrypt:input_type -> google.cloud.kms.v1.DecryptRequest + 25, // 60: google.cloud.kms.v1.KeyManagementService.AsymmetricSign:input_type -> google.cloud.kms.v1.AsymmetricSignRequest + 26, // 61: google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt:input_type -> google.cloud.kms.v1.AsymmetricDecryptRequest + 27, // 62: google.cloud.kms.v1.KeyManagementService.MacSign:input_type -> google.cloud.kms.v1.MacSignRequest + 28, // 63: google.cloud.kms.v1.KeyManagementService.MacVerify:input_type -> google.cloud.kms.v1.MacVerifyRequest + 29, // 64: google.cloud.kms.v1.KeyManagementService.GenerateRandomBytes:input_type -> google.cloud.kms.v1.GenerateRandomBytesRequest + 4, // 65: google.cloud.kms.v1.KeyManagementService.ListKeyRings:output_type -> google.cloud.kms.v1.ListKeyRingsResponse + 5, // 66: google.cloud.kms.v1.KeyManagementService.ListCryptoKeys:output_type -> google.cloud.kms.v1.ListCryptoKeysResponse + 6, // 67: google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions:output_type -> google.cloud.kms.v1.ListCryptoKeyVersionsResponse + 7, // 68: google.cloud.kms.v1.KeyManagementService.ListImportJobs:output_type -> google.cloud.kms.v1.ListImportJobsResponse + 40, // 69: google.cloud.kms.v1.KeyManagementService.GetKeyRing:output_type -> google.cloud.kms.v1.KeyRing + 41, // 70: google.cloud.kms.v1.KeyManagementService.GetCryptoKey:output_type -> google.cloud.kms.v1.CryptoKey + 42, // 71: google.cloud.kms.v1.KeyManagementService.GetCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion + 48, // 72: google.cloud.kms.v1.KeyManagementService.GetPublicKey:output_type -> google.cloud.kms.v1.PublicKey + 43, // 73: google.cloud.kms.v1.KeyManagementService.GetImportJob:output_type -> google.cloud.kms.v1.ImportJob + 40, // 74: google.cloud.kms.v1.KeyManagementService.CreateKeyRing:output_type -> google.cloud.kms.v1.KeyRing + 41, // 75: google.cloud.kms.v1.KeyManagementService.CreateCryptoKey:output_type -> google.cloud.kms.v1.CryptoKey + 42, // 76: google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion + 42, // 77: google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion + 43, // 78: google.cloud.kms.v1.KeyManagementService.CreateImportJob:output_type -> google.cloud.kms.v1.ImportJob + 41, // 79: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKey:output_type -> google.cloud.kms.v1.CryptoKey + 42, // 80: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion + 41, // 81: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion:output_type -> google.cloud.kms.v1.CryptoKey + 42, // 82: google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion + 42, // 83: google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion + 30, // 84: google.cloud.kms.v1.KeyManagementService.Encrypt:output_type -> google.cloud.kms.v1.EncryptResponse + 31, // 85: google.cloud.kms.v1.KeyManagementService.Decrypt:output_type -> google.cloud.kms.v1.DecryptResponse + 32, // 86: google.cloud.kms.v1.KeyManagementService.AsymmetricSign:output_type -> google.cloud.kms.v1.AsymmetricSignResponse + 33, // 87: google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt:output_type -> google.cloud.kms.v1.AsymmetricDecryptResponse + 34, // 88: google.cloud.kms.v1.KeyManagementService.MacSign:output_type -> google.cloud.kms.v1.MacSignResponse + 35, // 89: google.cloud.kms.v1.KeyManagementService.MacVerify:output_type -> google.cloud.kms.v1.MacVerifyResponse + 36, // 90: google.cloud.kms.v1.KeyManagementService.GenerateRandomBytes:output_type -> google.cloud.kms.v1.GenerateRandomBytesResponse + 65, // [65:91] is the sub-list for method output_type + 39, // [39:65] is the sub-list for method input_type + 39, // [39:39] is the sub-list for extension type_name + 39, // [39:39] is the sub-list for extension extendee + 0, // [0:39] is the sub-list for field type_name +} + +func init() { file_google_cloud_kms_v1_service_proto_init() } +func file_google_cloud_kms_v1_service_proto_init() { + if File_google_cloud_kms_v1_service_proto != nil { + return + } + file_google_cloud_kms_v1_resources_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_cloud_kms_v1_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListKeyRingsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListCryptoKeysRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListCryptoKeyVersionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListImportJobsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListKeyRingsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListCryptoKeysResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListCryptoKeyVersionsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListImportJobsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetKeyRingRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCryptoKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetPublicKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetImportJobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateKeyRingRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateCryptoKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateImportJobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateCryptoKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateCryptoKeyPrimaryVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DestroyCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RestoreCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EncryptRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DecryptRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AsymmetricSignRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AsymmetricDecryptRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MacSignRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MacVerifyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GenerateRandomBytesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EncryptResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DecryptResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AsymmetricSignResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AsymmetricDecryptResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MacSignResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MacVerifyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GenerateRandomBytesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Digest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocationMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[16].OneofWrappers = []interface{}{ + (*ImportCryptoKeyVersionRequest_RsaAesWrappedKey)(nil), + } + file_google_cloud_kms_v1_service_proto_msgTypes[37].OneofWrappers = []interface{}{ + (*Digest_Sha256)(nil), + (*Digest_Sha384)(nil), + (*Digest_Sha512)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_cloud_kms_v1_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 39, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_cloud_kms_v1_service_proto_goTypes, + DependencyIndexes: file_google_cloud_kms_v1_service_proto_depIdxs, + MessageInfos: file_google_cloud_kms_v1_service_proto_msgTypes, + }.Build() + File_google_cloud_kms_v1_service_proto = out.File + file_google_cloud_kms_v1_service_proto_rawDesc = nil + file_google_cloud_kms_v1_service_proto_goTypes = nil + file_google_cloud_kms_v1_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// KeyManagementServiceClient is the client API for KeyManagementService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KeyManagementServiceClient interface { + // Lists [KeyRings][google.cloud.kms.v1.KeyRing]. + ListKeyRings(ctx context.Context, in *ListKeyRingsRequest, opts ...grpc.CallOption) (*ListKeyRingsResponse, error) + // Lists [CryptoKeys][google.cloud.kms.v1.CryptoKey]. + ListCryptoKeys(ctx context.Context, in *ListCryptoKeysRequest, opts ...grpc.CallOption) (*ListCryptoKeysResponse, error) + // Lists [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]. + ListCryptoKeyVersions(ctx context.Context, in *ListCryptoKeyVersionsRequest, opts ...grpc.CallOption) (*ListCryptoKeyVersionsResponse, error) + // Lists [ImportJobs][google.cloud.kms.v1.ImportJob]. + ListImportJobs(ctx context.Context, in *ListImportJobsRequest, opts ...grpc.CallOption) (*ListImportJobsResponse, error) + // Returns metadata for a given [KeyRing][google.cloud.kms.v1.KeyRing]. + GetKeyRing(ctx context.Context, in *GetKeyRingRequest, opts ...grpc.CallOption) (*KeyRing, error) + // Returns metadata for a given [CryptoKey][google.cloud.kms.v1.CryptoKey], as + // well as its [primary][google.cloud.kms.v1.CryptoKey.primary] + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + GetCryptoKey(ctx context.Context, in *GetCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) + // Returns metadata for a given + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + GetCryptoKeyVersion(ctx context.Context, in *GetCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) + // Returns the public key for the given + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. The + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN] + // or + // [ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT]. + GetPublicKey(ctx context.Context, in *GetPublicKeyRequest, opts ...grpc.CallOption) (*PublicKey, error) + // Returns metadata for a given [ImportJob][google.cloud.kms.v1.ImportJob]. + GetImportJob(ctx context.Context, in *GetImportJobRequest, opts ...grpc.CallOption) (*ImportJob, error) + // Create a new [KeyRing][google.cloud.kms.v1.KeyRing] in a given Project and + // Location. + CreateKeyRing(ctx context.Context, in *CreateKeyRingRequest, opts ...grpc.CallOption) (*KeyRing, error) + // Create a new [CryptoKey][google.cloud.kms.v1.CryptoKey] within a + // [KeyRing][google.cloud.kms.v1.KeyRing]. + // + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] and + // [CryptoKey.version_template.algorithm][google.cloud.kms.v1.CryptoKeyVersionTemplate.algorithm] + // are required. + CreateCryptoKey(ctx context.Context, in *CreateCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) + // Create a new [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in a + // [CryptoKey][google.cloud.kms.v1.CryptoKey]. + // + // The server will assign the next sequential id. If unset, + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED]. + CreateCryptoKeyVersion(ctx context.Context, in *CreateCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) + // Import wrapped key material into a + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + // + // All requests must specify a [CryptoKey][google.cloud.kms.v1.CryptoKey]. If + // a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] is additionally + // specified in the request, key material will be reimported into that + // version. Otherwise, a new version will be created, and will be assigned the + // next sequential id within the [CryptoKey][google.cloud.kms.v1.CryptoKey]. + ImportCryptoKeyVersion(ctx context.Context, in *ImportCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) + // Create a new [ImportJob][google.cloud.kms.v1.ImportJob] within a + // [KeyRing][google.cloud.kms.v1.KeyRing]. + // + // [ImportJob.import_method][google.cloud.kms.v1.ImportJob.import_method] is + // required. + CreateImportJob(ctx context.Context, in *CreateImportJobRequest, opts ...grpc.CallOption) (*ImportJob, error) + // Update a [CryptoKey][google.cloud.kms.v1.CryptoKey]. + UpdateCryptoKey(ctx context.Context, in *UpdateCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) + // Update a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s + // metadata. + // + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] may be changed between + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] + // and + // [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED] + // using this method. See + // [DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion] + // and + // [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] + // to move between other states. + UpdateCryptoKeyVersion(ctx context.Context, in *UpdateCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) + // Update the version of a [CryptoKey][google.cloud.kms.v1.CryptoKey] that + // will be used in + // [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. + // + // Returns an error if called on a key whose purpose is not + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + UpdateCryptoKeyPrimaryVersion(ctx context.Context, in *UpdateCryptoKeyPrimaryVersionRequest, opts ...grpc.CallOption) (*CryptoKey, error) + // Schedule a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] for + // destruction. + // + // Upon calling this method, + // [CryptoKeyVersion.state][google.cloud.kms.v1.CryptoKeyVersion.state] will + // be set to + // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED], + // and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will + // be set to the time + // [destroy_scheduled_duration][google.cloud.kms.v1.CryptoKey.destroy_scheduled_duration] + // in the future. At that time, the + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] will automatically + // change to + // [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED], + // and the key material will be irrevocably destroyed. + // + // Before the + // [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] is + // reached, + // [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] + // may be called to reverse the process. + DestroyCryptoKeyVersion(ctx context.Context, in *DestroyCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) + // Restore a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in the + // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED] + // state. + // + // Upon restoration of the CryptoKeyVersion, + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to + // [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED], + // and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will + // be cleared. + RestoreCryptoKeyVersion(ctx context.Context, in *RestoreCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) + // Encrypts data, so that it can only be recovered by a call to + // [Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. The + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + Encrypt(ctx context.Context, in *EncryptRequest, opts ...grpc.CallOption) (*EncryptResponse, error) + // Decrypts data that was protected by + // [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. The + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + Decrypt(ctx context.Context, in *DecryptRequest, opts ...grpc.CallOption) (*DecryptResponse, error) + // Signs data using a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] + // ASYMMETRIC_SIGN, producing a signature that can be verified with the public + // key retrieved from + // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. + AsymmetricSign(ctx context.Context, in *AsymmetricSignRequest, opts ...grpc.CallOption) (*AsymmetricSignResponse, error) + // Decrypts data that was encrypted with a public key retrieved from + // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey] + // corresponding to a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] + // ASYMMETRIC_DECRYPT. + AsymmetricDecrypt(ctx context.Context, in *AsymmetricDecryptRequest, opts ...grpc.CallOption) (*AsymmetricDecryptResponse, error) + // Signs data using a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] MAC, + // producing a tag that can be verified by another source with the same key. + MacSign(ctx context.Context, in *MacSignRequest, opts ...grpc.CallOption) (*MacSignResponse, error) + // Verifies MAC tag using a + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] MAC, and returns + // a response that indicates whether or not the verification was successful. + MacVerify(ctx context.Context, in *MacVerifyRequest, opts ...grpc.CallOption) (*MacVerifyResponse, error) + // Generate random bytes using the Cloud KMS randomness source in the provided + // location. + GenerateRandomBytes(ctx context.Context, in *GenerateRandomBytesRequest, opts ...grpc.CallOption) (*GenerateRandomBytesResponse, error) +} + +type keyManagementServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewKeyManagementServiceClient(cc grpc.ClientConnInterface) KeyManagementServiceClient { + return &keyManagementServiceClient{cc} +} + +func (c *keyManagementServiceClient) ListKeyRings(ctx context.Context, in *ListKeyRingsRequest, opts ...grpc.CallOption) (*ListKeyRingsResponse, error) { + out := new(ListKeyRingsResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/ListKeyRings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) ListCryptoKeys(ctx context.Context, in *ListCryptoKeysRequest, opts ...grpc.CallOption) (*ListCryptoKeysResponse, error) { + out := new(ListCryptoKeysResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/ListCryptoKeys", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) ListCryptoKeyVersions(ctx context.Context, in *ListCryptoKeyVersionsRequest, opts ...grpc.CallOption) (*ListCryptoKeyVersionsResponse, error) { + out := new(ListCryptoKeyVersionsResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/ListCryptoKeyVersions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) ListImportJobs(ctx context.Context, in *ListImportJobsRequest, opts ...grpc.CallOption) (*ListImportJobsResponse, error) { + out := new(ListImportJobsResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/ListImportJobs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) GetKeyRing(ctx context.Context, in *GetKeyRingRequest, opts ...grpc.CallOption) (*KeyRing, error) { + out := new(KeyRing) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/GetKeyRing", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) GetCryptoKey(ctx context.Context, in *GetCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) { + out := new(CryptoKey) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/GetCryptoKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) GetCryptoKeyVersion(ctx context.Context, in *GetCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) { + out := new(CryptoKeyVersion) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/GetCryptoKeyVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) GetPublicKey(ctx context.Context, in *GetPublicKeyRequest, opts ...grpc.CallOption) (*PublicKey, error) { + out := new(PublicKey) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/GetPublicKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) GetImportJob(ctx context.Context, in *GetImportJobRequest, opts ...grpc.CallOption) (*ImportJob, error) { + out := new(ImportJob) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/GetImportJob", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) CreateKeyRing(ctx context.Context, in *CreateKeyRingRequest, opts ...grpc.CallOption) (*KeyRing, error) { + out := new(KeyRing) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/CreateKeyRing", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) CreateCryptoKey(ctx context.Context, in *CreateCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) { + out := new(CryptoKey) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/CreateCryptoKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) CreateCryptoKeyVersion(ctx context.Context, in *CreateCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) { + out := new(CryptoKeyVersion) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/CreateCryptoKeyVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) ImportCryptoKeyVersion(ctx context.Context, in *ImportCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) { + out := new(CryptoKeyVersion) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/ImportCryptoKeyVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) CreateImportJob(ctx context.Context, in *CreateImportJobRequest, opts ...grpc.CallOption) (*ImportJob, error) { + out := new(ImportJob) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/CreateImportJob", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) UpdateCryptoKey(ctx context.Context, in *UpdateCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) { + out := new(CryptoKey) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) UpdateCryptoKeyVersion(ctx context.Context, in *UpdateCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) { + out := new(CryptoKeyVersion) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKeyVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) UpdateCryptoKeyPrimaryVersion(ctx context.Context, in *UpdateCryptoKeyPrimaryVersionRequest, opts ...grpc.CallOption) (*CryptoKey, error) { + out := new(CryptoKey) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKeyPrimaryVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) DestroyCryptoKeyVersion(ctx context.Context, in *DestroyCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) { + out := new(CryptoKeyVersion) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/DestroyCryptoKeyVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) RestoreCryptoKeyVersion(ctx context.Context, in *RestoreCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) { + out := new(CryptoKeyVersion) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/RestoreCryptoKeyVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) Encrypt(ctx context.Context, in *EncryptRequest, opts ...grpc.CallOption) (*EncryptResponse, error) { + out := new(EncryptResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/Encrypt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) Decrypt(ctx context.Context, in *DecryptRequest, opts ...grpc.CallOption) (*DecryptResponse, error) { + out := new(DecryptResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/Decrypt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) AsymmetricSign(ctx context.Context, in *AsymmetricSignRequest, opts ...grpc.CallOption) (*AsymmetricSignResponse, error) { + out := new(AsymmetricSignResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/AsymmetricSign", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) AsymmetricDecrypt(ctx context.Context, in *AsymmetricDecryptRequest, opts ...grpc.CallOption) (*AsymmetricDecryptResponse, error) { + out := new(AsymmetricDecryptResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/AsymmetricDecrypt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) MacSign(ctx context.Context, in *MacSignRequest, opts ...grpc.CallOption) (*MacSignResponse, error) { + out := new(MacSignResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/MacSign", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) MacVerify(ctx context.Context, in *MacVerifyRequest, opts ...grpc.CallOption) (*MacVerifyResponse, error) { + out := new(MacVerifyResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/MacVerify", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) GenerateRandomBytes(ctx context.Context, in *GenerateRandomBytesRequest, opts ...grpc.CallOption) (*GenerateRandomBytesResponse, error) { + out := new(GenerateRandomBytesResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/GenerateRandomBytes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KeyManagementServiceServer is the server API for KeyManagementService service. +type KeyManagementServiceServer interface { + // Lists [KeyRings][google.cloud.kms.v1.KeyRing]. + ListKeyRings(context.Context, *ListKeyRingsRequest) (*ListKeyRingsResponse, error) + // Lists [CryptoKeys][google.cloud.kms.v1.CryptoKey]. + ListCryptoKeys(context.Context, *ListCryptoKeysRequest) (*ListCryptoKeysResponse, error) + // Lists [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]. + ListCryptoKeyVersions(context.Context, *ListCryptoKeyVersionsRequest) (*ListCryptoKeyVersionsResponse, error) + // Lists [ImportJobs][google.cloud.kms.v1.ImportJob]. + ListImportJobs(context.Context, *ListImportJobsRequest) (*ListImportJobsResponse, error) + // Returns metadata for a given [KeyRing][google.cloud.kms.v1.KeyRing]. + GetKeyRing(context.Context, *GetKeyRingRequest) (*KeyRing, error) + // Returns metadata for a given [CryptoKey][google.cloud.kms.v1.CryptoKey], as + // well as its [primary][google.cloud.kms.v1.CryptoKey.primary] + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + GetCryptoKey(context.Context, *GetCryptoKeyRequest) (*CryptoKey, error) + // Returns metadata for a given + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + GetCryptoKeyVersion(context.Context, *GetCryptoKeyVersionRequest) (*CryptoKeyVersion, error) + // Returns the public key for the given + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. The + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN] + // or + // [ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT]. + GetPublicKey(context.Context, *GetPublicKeyRequest) (*PublicKey, error) + // Returns metadata for a given [ImportJob][google.cloud.kms.v1.ImportJob]. + GetImportJob(context.Context, *GetImportJobRequest) (*ImportJob, error) + // Create a new [KeyRing][google.cloud.kms.v1.KeyRing] in a given Project and + // Location. + CreateKeyRing(context.Context, *CreateKeyRingRequest) (*KeyRing, error) + // Create a new [CryptoKey][google.cloud.kms.v1.CryptoKey] within a + // [KeyRing][google.cloud.kms.v1.KeyRing]. + // + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] and + // [CryptoKey.version_template.algorithm][google.cloud.kms.v1.CryptoKeyVersionTemplate.algorithm] + // are required. + CreateCryptoKey(context.Context, *CreateCryptoKeyRequest) (*CryptoKey, error) + // Create a new [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in a + // [CryptoKey][google.cloud.kms.v1.CryptoKey]. + // + // The server will assign the next sequential id. If unset, + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED]. + CreateCryptoKeyVersion(context.Context, *CreateCryptoKeyVersionRequest) (*CryptoKeyVersion, error) + // Import wrapped key material into a + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + // + // All requests must specify a [CryptoKey][google.cloud.kms.v1.CryptoKey]. If + // a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] is additionally + // specified in the request, key material will be reimported into that + // version. Otherwise, a new version will be created, and will be assigned the + // next sequential id within the [CryptoKey][google.cloud.kms.v1.CryptoKey]. + ImportCryptoKeyVersion(context.Context, *ImportCryptoKeyVersionRequest) (*CryptoKeyVersion, error) + // Create a new [ImportJob][google.cloud.kms.v1.ImportJob] within a + // [KeyRing][google.cloud.kms.v1.KeyRing]. + // + // [ImportJob.import_method][google.cloud.kms.v1.ImportJob.import_method] is + // required. + CreateImportJob(context.Context, *CreateImportJobRequest) (*ImportJob, error) + // Update a [CryptoKey][google.cloud.kms.v1.CryptoKey]. + UpdateCryptoKey(context.Context, *UpdateCryptoKeyRequest) (*CryptoKey, error) + // Update a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s + // metadata. + // + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] may be changed between + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] + // and + // [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED] + // using this method. See + // [DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion] + // and + // [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] + // to move between other states. + UpdateCryptoKeyVersion(context.Context, *UpdateCryptoKeyVersionRequest) (*CryptoKeyVersion, error) + // Update the version of a [CryptoKey][google.cloud.kms.v1.CryptoKey] that + // will be used in + // [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. + // + // Returns an error if called on a key whose purpose is not + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + UpdateCryptoKeyPrimaryVersion(context.Context, *UpdateCryptoKeyPrimaryVersionRequest) (*CryptoKey, error) + // Schedule a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] for + // destruction. + // + // Upon calling this method, + // [CryptoKeyVersion.state][google.cloud.kms.v1.CryptoKeyVersion.state] will + // be set to + // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED], + // and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will + // be set to the time + // [destroy_scheduled_duration][google.cloud.kms.v1.CryptoKey.destroy_scheduled_duration] + // in the future. At that time, the + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] will automatically + // change to + // [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED], + // and the key material will be irrevocably destroyed. + // + // Before the + // [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] is + // reached, + // [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] + // may be called to reverse the process. + DestroyCryptoKeyVersion(context.Context, *DestroyCryptoKeyVersionRequest) (*CryptoKeyVersion, error) + // Restore a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in the + // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED] + // state. + // + // Upon restoration of the CryptoKeyVersion, + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to + // [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED], + // and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will + // be cleared. + RestoreCryptoKeyVersion(context.Context, *RestoreCryptoKeyVersionRequest) (*CryptoKeyVersion, error) + // Encrypts data, so that it can only be recovered by a call to + // [Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. The + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + Encrypt(context.Context, *EncryptRequest) (*EncryptResponse, error) + // Decrypts data that was protected by + // [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. The + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + Decrypt(context.Context, *DecryptRequest) (*DecryptResponse, error) + // Signs data using a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] + // ASYMMETRIC_SIGN, producing a signature that can be verified with the public + // key retrieved from + // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. + AsymmetricSign(context.Context, *AsymmetricSignRequest) (*AsymmetricSignResponse, error) + // Decrypts data that was encrypted with a public key retrieved from + // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey] + // corresponding to a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] + // ASYMMETRIC_DECRYPT. + AsymmetricDecrypt(context.Context, *AsymmetricDecryptRequest) (*AsymmetricDecryptResponse, error) + // Signs data using a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] MAC, + // producing a tag that can be verified by another source with the same key. + MacSign(context.Context, *MacSignRequest) (*MacSignResponse, error) + // Verifies MAC tag using a + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] MAC, and returns + // a response that indicates whether or not the verification was successful. + MacVerify(context.Context, *MacVerifyRequest) (*MacVerifyResponse, error) + // Generate random bytes using the Cloud KMS randomness source in the provided + // location. + GenerateRandomBytes(context.Context, *GenerateRandomBytesRequest) (*GenerateRandomBytesResponse, error) +} + +// UnimplementedKeyManagementServiceServer can be embedded to have forward compatible implementations. +type UnimplementedKeyManagementServiceServer struct { +} + +func (*UnimplementedKeyManagementServiceServer) ListKeyRings(context.Context, *ListKeyRingsRequest) (*ListKeyRingsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListKeyRings not implemented") +} +func (*UnimplementedKeyManagementServiceServer) ListCryptoKeys(context.Context, *ListCryptoKeysRequest) (*ListCryptoKeysResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListCryptoKeys not implemented") +} +func (*UnimplementedKeyManagementServiceServer) ListCryptoKeyVersions(context.Context, *ListCryptoKeyVersionsRequest) (*ListCryptoKeyVersionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListCryptoKeyVersions not implemented") +} +func (*UnimplementedKeyManagementServiceServer) ListImportJobs(context.Context, *ListImportJobsRequest) (*ListImportJobsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListImportJobs not implemented") +} +func (*UnimplementedKeyManagementServiceServer) GetKeyRing(context.Context, *GetKeyRingRequest) (*KeyRing, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetKeyRing not implemented") +} +func (*UnimplementedKeyManagementServiceServer) GetCryptoKey(context.Context, *GetCryptoKeyRequest) (*CryptoKey, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCryptoKey not implemented") +} +func (*UnimplementedKeyManagementServiceServer) GetCryptoKeyVersion(context.Context, *GetCryptoKeyVersionRequest) (*CryptoKeyVersion, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCryptoKeyVersion not implemented") +} +func (*UnimplementedKeyManagementServiceServer) GetPublicKey(context.Context, *GetPublicKeyRequest) (*PublicKey, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPublicKey not implemented") +} +func (*UnimplementedKeyManagementServiceServer) GetImportJob(context.Context, *GetImportJobRequest) (*ImportJob, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetImportJob not implemented") +} +func (*UnimplementedKeyManagementServiceServer) CreateKeyRing(context.Context, *CreateKeyRingRequest) (*KeyRing, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateKeyRing not implemented") +} +func (*UnimplementedKeyManagementServiceServer) CreateCryptoKey(context.Context, *CreateCryptoKeyRequest) (*CryptoKey, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateCryptoKey not implemented") +} +func (*UnimplementedKeyManagementServiceServer) CreateCryptoKeyVersion(context.Context, *CreateCryptoKeyVersionRequest) (*CryptoKeyVersion, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateCryptoKeyVersion not implemented") +} +func (*UnimplementedKeyManagementServiceServer) ImportCryptoKeyVersion(context.Context, *ImportCryptoKeyVersionRequest) (*CryptoKeyVersion, error) { + return nil, status.Errorf(codes.Unimplemented, "method ImportCryptoKeyVersion not implemented") +} +func (*UnimplementedKeyManagementServiceServer) CreateImportJob(context.Context, *CreateImportJobRequest) (*ImportJob, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateImportJob not implemented") +} +func (*UnimplementedKeyManagementServiceServer) UpdateCryptoKey(context.Context, *UpdateCryptoKeyRequest) (*CryptoKey, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateCryptoKey not implemented") +} +func (*UnimplementedKeyManagementServiceServer) UpdateCryptoKeyVersion(context.Context, *UpdateCryptoKeyVersionRequest) (*CryptoKeyVersion, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateCryptoKeyVersion not implemented") +} +func (*UnimplementedKeyManagementServiceServer) UpdateCryptoKeyPrimaryVersion(context.Context, *UpdateCryptoKeyPrimaryVersionRequest) (*CryptoKey, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateCryptoKeyPrimaryVersion not implemented") +} +func (*UnimplementedKeyManagementServiceServer) DestroyCryptoKeyVersion(context.Context, *DestroyCryptoKeyVersionRequest) (*CryptoKeyVersion, error) { + return nil, status.Errorf(codes.Unimplemented, "method DestroyCryptoKeyVersion not implemented") +} +func (*UnimplementedKeyManagementServiceServer) RestoreCryptoKeyVersion(context.Context, *RestoreCryptoKeyVersionRequest) (*CryptoKeyVersion, error) { + return nil, status.Errorf(codes.Unimplemented, "method RestoreCryptoKeyVersion not implemented") +} +func (*UnimplementedKeyManagementServiceServer) Encrypt(context.Context, *EncryptRequest) (*EncryptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Encrypt not implemented") +} +func (*UnimplementedKeyManagementServiceServer) Decrypt(context.Context, *DecryptRequest) (*DecryptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Decrypt not implemented") +} +func (*UnimplementedKeyManagementServiceServer) AsymmetricSign(context.Context, *AsymmetricSignRequest) (*AsymmetricSignResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AsymmetricSign not implemented") +} +func (*UnimplementedKeyManagementServiceServer) AsymmetricDecrypt(context.Context, *AsymmetricDecryptRequest) (*AsymmetricDecryptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AsymmetricDecrypt not implemented") +} +func (*UnimplementedKeyManagementServiceServer) MacSign(context.Context, *MacSignRequest) (*MacSignResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MacSign not implemented") +} +func (*UnimplementedKeyManagementServiceServer) MacVerify(context.Context, *MacVerifyRequest) (*MacVerifyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MacVerify not implemented") +} +func (*UnimplementedKeyManagementServiceServer) GenerateRandomBytes(context.Context, *GenerateRandomBytesRequest) (*GenerateRandomBytesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GenerateRandomBytes not implemented") +} + +func RegisterKeyManagementServiceServer(s *grpc.Server, srv KeyManagementServiceServer) { + s.RegisterService(&_KeyManagementService_serviceDesc, srv) +} + +func _KeyManagementService_ListKeyRings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListKeyRingsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).ListKeyRings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/ListKeyRings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).ListKeyRings(ctx, req.(*ListKeyRingsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_ListCryptoKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListCryptoKeysRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).ListCryptoKeys(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/ListCryptoKeys", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).ListCryptoKeys(ctx, req.(*ListCryptoKeysRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_ListCryptoKeyVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListCryptoKeyVersionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).ListCryptoKeyVersions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/ListCryptoKeyVersions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).ListCryptoKeyVersions(ctx, req.(*ListCryptoKeyVersionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_ListImportJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListImportJobsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).ListImportJobs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/ListImportJobs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).ListImportJobs(ctx, req.(*ListImportJobsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_GetKeyRing_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetKeyRingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).GetKeyRing(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/GetKeyRing", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).GetKeyRing(ctx, req.(*GetKeyRingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_GetCryptoKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCryptoKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).GetCryptoKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/GetCryptoKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).GetCryptoKey(ctx, req.(*GetCryptoKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_GetCryptoKeyVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCryptoKeyVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).GetCryptoKeyVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/GetCryptoKeyVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).GetCryptoKeyVersion(ctx, req.(*GetCryptoKeyVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_GetPublicKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPublicKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).GetPublicKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/GetPublicKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).GetPublicKey(ctx, req.(*GetPublicKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_GetImportJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetImportJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).GetImportJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/GetImportJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).GetImportJob(ctx, req.(*GetImportJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_CreateKeyRing_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateKeyRingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).CreateKeyRing(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/CreateKeyRing", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).CreateKeyRing(ctx, req.(*CreateKeyRingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_CreateCryptoKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateCryptoKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).CreateCryptoKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/CreateCryptoKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).CreateCryptoKey(ctx, req.(*CreateCryptoKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_CreateCryptoKeyVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateCryptoKeyVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).CreateCryptoKeyVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/CreateCryptoKeyVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).CreateCryptoKeyVersion(ctx, req.(*CreateCryptoKeyVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_ImportCryptoKeyVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImportCryptoKeyVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).ImportCryptoKeyVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/ImportCryptoKeyVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).ImportCryptoKeyVersion(ctx, req.(*ImportCryptoKeyVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_CreateImportJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateImportJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).CreateImportJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/CreateImportJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).CreateImportJob(ctx, req.(*CreateImportJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_UpdateCryptoKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateCryptoKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).UpdateCryptoKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).UpdateCryptoKey(ctx, req.(*UpdateCryptoKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_UpdateCryptoKeyVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateCryptoKeyVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).UpdateCryptoKeyVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKeyVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).UpdateCryptoKeyVersion(ctx, req.(*UpdateCryptoKeyVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_UpdateCryptoKeyPrimaryVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateCryptoKeyPrimaryVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).UpdateCryptoKeyPrimaryVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKeyPrimaryVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).UpdateCryptoKeyPrimaryVersion(ctx, req.(*UpdateCryptoKeyPrimaryVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_DestroyCryptoKeyVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DestroyCryptoKeyVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).DestroyCryptoKeyVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/DestroyCryptoKeyVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).DestroyCryptoKeyVersion(ctx, req.(*DestroyCryptoKeyVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_RestoreCryptoKeyVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RestoreCryptoKeyVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).RestoreCryptoKeyVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/RestoreCryptoKeyVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).RestoreCryptoKeyVersion(ctx, req.(*RestoreCryptoKeyVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_Encrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EncryptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).Encrypt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/Encrypt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).Encrypt(ctx, req.(*EncryptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_Decrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DecryptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).Decrypt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/Decrypt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).Decrypt(ctx, req.(*DecryptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_AsymmetricSign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AsymmetricSignRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).AsymmetricSign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/AsymmetricSign", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).AsymmetricSign(ctx, req.(*AsymmetricSignRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_AsymmetricDecrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AsymmetricDecryptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).AsymmetricDecrypt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/AsymmetricDecrypt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).AsymmetricDecrypt(ctx, req.(*AsymmetricDecryptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_MacSign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MacSignRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).MacSign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/MacSign", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).MacSign(ctx, req.(*MacSignRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_MacVerify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MacVerifyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).MacVerify(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/MacVerify", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).MacVerify(ctx, req.(*MacVerifyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_GenerateRandomBytes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GenerateRandomBytesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).GenerateRandomBytes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/GenerateRandomBytes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).GenerateRandomBytes(ctx, req.(*GenerateRandomBytesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _KeyManagementService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.kms.v1.KeyManagementService", + HandlerType: (*KeyManagementServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListKeyRings", + Handler: _KeyManagementService_ListKeyRings_Handler, + }, + { + MethodName: "ListCryptoKeys", + Handler: _KeyManagementService_ListCryptoKeys_Handler, + }, + { + MethodName: "ListCryptoKeyVersions", + Handler: _KeyManagementService_ListCryptoKeyVersions_Handler, + }, + { + MethodName: "ListImportJobs", + Handler: _KeyManagementService_ListImportJobs_Handler, + }, + { + MethodName: "GetKeyRing", + Handler: _KeyManagementService_GetKeyRing_Handler, + }, + { + MethodName: "GetCryptoKey", + Handler: _KeyManagementService_GetCryptoKey_Handler, + }, + { + MethodName: "GetCryptoKeyVersion", + Handler: _KeyManagementService_GetCryptoKeyVersion_Handler, + }, + { + MethodName: "GetPublicKey", + Handler: _KeyManagementService_GetPublicKey_Handler, + }, + { + MethodName: "GetImportJob", + Handler: _KeyManagementService_GetImportJob_Handler, + }, + { + MethodName: "CreateKeyRing", + Handler: _KeyManagementService_CreateKeyRing_Handler, + }, + { + MethodName: "CreateCryptoKey", + Handler: _KeyManagementService_CreateCryptoKey_Handler, + }, + { + MethodName: "CreateCryptoKeyVersion", + Handler: _KeyManagementService_CreateCryptoKeyVersion_Handler, + }, + { + MethodName: "ImportCryptoKeyVersion", + Handler: _KeyManagementService_ImportCryptoKeyVersion_Handler, + }, + { + MethodName: "CreateImportJob", + Handler: _KeyManagementService_CreateImportJob_Handler, + }, + { + MethodName: "UpdateCryptoKey", + Handler: _KeyManagementService_UpdateCryptoKey_Handler, + }, + { + MethodName: "UpdateCryptoKeyVersion", + Handler: _KeyManagementService_UpdateCryptoKeyVersion_Handler, + }, + { + MethodName: "UpdateCryptoKeyPrimaryVersion", + Handler: _KeyManagementService_UpdateCryptoKeyPrimaryVersion_Handler, + }, + { + MethodName: "DestroyCryptoKeyVersion", + Handler: _KeyManagementService_DestroyCryptoKeyVersion_Handler, + }, + { + MethodName: "RestoreCryptoKeyVersion", + Handler: _KeyManagementService_RestoreCryptoKeyVersion_Handler, + }, + { + MethodName: "Encrypt", + Handler: _KeyManagementService_Encrypt_Handler, + }, + { + MethodName: "Decrypt", + Handler: _KeyManagementService_Decrypt_Handler, + }, + { + MethodName: "AsymmetricSign", + Handler: _KeyManagementService_AsymmetricSign_Handler, + }, + { + MethodName: "AsymmetricDecrypt", + Handler: _KeyManagementService_AsymmetricDecrypt_Handler, + }, + { + MethodName: "MacSign", + Handler: _KeyManagementService_MacSign_Handler, + }, + { + MethodName: "MacVerify", + Handler: _KeyManagementService_MacVerify_Handler, + }, + { + MethodName: "GenerateRandomBytes", + Handler: _KeyManagementService_GenerateRandomBytes_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/kms/v1/service.proto", +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/version.go b/vendor/cloud.google.com/go/kms/apiv1/version.go new file mode 100644 index 00000000000..03fc7531ca8 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/version.go @@ -0,0 +1,23 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapicgen. DO NOT EDIT. + +package kms + +import "cloud.google.com/go/kms/internal" + +func init() { + versionClient = internal.Version +} diff --git a/vendor/cloud.google.com/go/kms/internal/version.go b/vendor/cloud.google.com/go/kms/internal/version.go new file mode 100644 index 00000000000..7c251d99455 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.7.0" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/auth/auth.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/auth/auth.go new file mode 100644 index 00000000000..1f183448209 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/auth/auth.go @@ -0,0 +1,65 @@ +package auth + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +import ( + "os" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/azure/auth" +) + +// NewAuthorizerFromEnvironment creates a keyvault dataplane Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) { + res, err := getResource() + if err != nil { + return nil, err + } + return auth.NewAuthorizerFromEnvironmentWithResource(res) +} + +// NewAuthorizerFromFile creates a keyvault dataplane Authorizer configured from a configuration file. +// The path to the configuration file must be specified in the AZURE_AUTH_LOCATION environment variable. +func NewAuthorizerFromFile() (autorest.Authorizer, error) { + res, err := getResource() + if err != nil { + return nil, err + } + return auth.NewAuthorizerFromFileWithResource(res) +} + +// NewAuthorizerFromCLI creates a keyvault dataplane Authorizer configured from Azure CLI 2.0 for local development scenarios. +func NewAuthorizerFromCLI() (autorest.Authorizer, error) { + res, err := getResource() + if err != nil { + return nil, err + } + return auth.NewAuthorizerFromCLIWithResource(res) +} + +func getResource() (string, error) { + var env azure.Environment + + if envName := os.Getenv("AZURE_ENVIRONMENT"); envName == "" { + env = azure.PublicCloud + } else { + var err error + env, err = azure.EnvironmentFromName(envName) + if err != nil { + return "", err + } + } + + resource := os.Getenv("AZURE_KEYVAULT_RESOURCE") + if resource == "" { + resource = env.ResourceIdentifiers.KeyVault + } + + return resource, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/CHANGELOG.md new file mode 100644 index 00000000000..6c701c1c40a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/CHANGELOG.md @@ -0,0 +1,26 @@ +# Change History + +## Additive Changes + +### New Funcs + +1. BackupCertificateResult.MarshalJSON() ([]byte, error) +1. BackupKeyResult.MarshalJSON() ([]byte, error) +1. BackupSecretResult.MarshalJSON() ([]byte, error) +1. BackupStorageResult.MarshalJSON() ([]byte, error) +1. CertificateIssuerListResult.MarshalJSON() ([]byte, error) +1. CertificateListResult.MarshalJSON() ([]byte, error) +1. DeletedCertificateListResult.MarshalJSON() ([]byte, error) +1. DeletedKeyListResult.MarshalJSON() ([]byte, error) +1. DeletedSasDefinitionListResult.MarshalJSON() ([]byte, error) +1. DeletedSecretListResult.MarshalJSON() ([]byte, error) +1. DeletedStorageListResult.MarshalJSON() ([]byte, error) +1. Error.MarshalJSON() ([]byte, error) +1. ErrorType.MarshalJSON() ([]byte, error) +1. KeyListResult.MarshalJSON() ([]byte, error) +1. KeyOperationResult.MarshalJSON() ([]byte, error) +1. KeyVerifyResult.MarshalJSON() ([]byte, error) +1. PendingCertificateSigningRequestResult.MarshalJSON() ([]byte, error) +1. SasDefinitionListResult.MarshalJSON() ([]byte, error) +1. SecretListResult.MarshalJSON() ([]byte, error) +1. StorageListResult.MarshalJSON() ([]byte, error) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/client.go new file mode 100644 index 00000000000..e101ccbd14f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/client.go @@ -0,0 +1,7313 @@ +// Package keyvault implements the Azure ARM Keyvault service API version 7.1. +// +// The key vault client performs cryptographic key operations and vault operations against the Key Vault service. +package keyvault + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// BaseClient is the base client for Keyvault. +type BaseClient struct { + autorest.Client +} + +// New creates an instance of the BaseClient client. +func New() BaseClient { + return NewWithoutDefaults() +} + +// NewWithoutDefaults creates an instance of the BaseClient client. +func NewWithoutDefaults() BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + } +} + +// BackupCertificate requests that a backup of the specified certificate be downloaded to the client. All versions of +// the certificate will be downloaded. This operation requires the certificates/backup permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// certificateName - the name of the certificate. +func (client BaseClient) BackupCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result BackupCertificateResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupCertificate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.BackupCertificatePreparer(ctx, vaultBaseURL, certificateName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupCertificate", nil, "Failure preparing request") + return + } + + resp, err := client.BackupCertificateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupCertificate", resp, "Failure sending request") + return + } + + result, err = client.BackupCertificateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupCertificate", resp, "Failure responding to request") + return + } + + return +} + +// BackupCertificatePreparer prepares the BackupCertificate request. +func (client BaseClient) BackupCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "certificate-name": autorest.Encode("path", certificateName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/{certificate-name}/backup", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// BackupCertificateSender sends the BackupCertificate request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) BackupCertificateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// BackupCertificateResponder handles the response to the BackupCertificate request. The method always +// closes the http.Response Body. +func (client BaseClient) BackupCertificateResponder(resp *http.Response) (result BackupCertificateResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// BackupKey the Key Backup operation exports a key from Azure Key Vault in a protected form. Note that this operation +// does NOT return key material in a form that can be used outside the Azure Key Vault system, the returned key +// material is either protected to a Azure Key Vault HSM or to Azure Key Vault itself. The intent of this operation is +// to allow a client to GENERATE a key in one Azure Key Vault instance, BACKUP the key, and then RESTORE it into +// another Azure Key Vault instance. The BACKUP operation may be used to export, in protected form, any key type from +// Azure Key Vault. Individual versions of a key cannot be backed up. BACKUP / RESTORE can be performed within +// geographical boundaries only; meaning that a BACKUP from one geographical area cannot be restored to another +// geographical area. For example, a backup from the US geographical area cannot be restored in an EU geographical +// area. This operation requires the key/backup permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// keyName - the name of the key. +func (client BaseClient) BackupKey(ctx context.Context, vaultBaseURL string, keyName string) (result BackupKeyResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupKey") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.BackupKeyPreparer(ctx, vaultBaseURL, keyName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupKey", nil, "Failure preparing request") + return + } + + resp, err := client.BackupKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupKey", resp, "Failure sending request") + return + } + + result, err = client.BackupKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupKey", resp, "Failure responding to request") + return + } + + return +} + +// BackupKeyPreparer prepares the BackupKey request. +func (client BaseClient) BackupKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "key-name": autorest.Encode("path", keyName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/keys/{key-name}/backup", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// BackupKeySender sends the BackupKey request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) BackupKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// BackupKeyResponder handles the response to the BackupKey request. The method always +// closes the http.Response Body. +func (client BaseClient) BackupKeyResponder(resp *http.Response) (result BackupKeyResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// BackupSecret requests that a backup of the specified secret be downloaded to the client. All versions of the secret +// will be downloaded. This operation requires the secrets/backup permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// secretName - the name of the secret. +func (client BaseClient) BackupSecret(ctx context.Context, vaultBaseURL string, secretName string) (result BackupSecretResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupSecret") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.BackupSecretPreparer(ctx, vaultBaseURL, secretName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupSecret", nil, "Failure preparing request") + return + } + + resp, err := client.BackupSecretSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupSecret", resp, "Failure sending request") + return + } + + result, err = client.BackupSecretResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupSecret", resp, "Failure responding to request") + return + } + + return +} + +// BackupSecretPreparer prepares the BackupSecret request. +func (client BaseClient) BackupSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "secret-name": autorest.Encode("path", secretName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/secrets/{secret-name}/backup", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// BackupSecretSender sends the BackupSecret request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) BackupSecretSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// BackupSecretResponder handles the response to the BackupSecret request. The method always +// closes the http.Response Body. +func (client BaseClient) BackupSecretResponder(resp *http.Response) (result BackupSecretResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// BackupStorageAccount requests that a backup of the specified storage account be downloaded to the client. This +// operation requires the storage/backup permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +func (client BaseClient) BackupStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result BackupStorageResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupStorageAccount") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.BackupStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupStorageAccount", nil, "Failure preparing request") + return + } + + resp, err := client.BackupStorageAccountSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupStorageAccount", resp, "Failure sending request") + return + } + + result, err = client.BackupStorageAccountResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupStorageAccount", resp, "Failure responding to request") + return + } + + return +} + +// BackupStorageAccountPreparer prepares the BackupStorageAccount request. +func (client BaseClient) BackupStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/storage/{storage-account-name}/backup", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// BackupStorageAccountSender sends the BackupStorageAccount request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) BackupStorageAccountSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// BackupStorageAccountResponder handles the response to the BackupStorageAccount request. The method always +// closes the http.Response Body. +func (client BaseClient) BackupStorageAccountResponder(resp *http.Response) (result BackupStorageResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateCertificate if this is the first version, the certificate resource is created. This operation requires the +// certificates/create permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// certificateName - the name of the certificate. +// parameters - the parameters to create a certificate. +func (client BaseClient) CreateCertificate(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateCreateParameters) (result CertificateOperation, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.CreateCertificate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: certificateName, + Constraints: []validation.Constraint{{Target: "certificateName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.CertificatePolicy", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}}}, + }}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "CreateCertificate", err.Error()) + } + + req, err := client.CreateCertificatePreparer(ctx, vaultBaseURL, certificateName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateCertificate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateCertificateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateCertificate", resp, "Failure sending request") + return + } + + result, err = client.CreateCertificateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateCertificate", resp, "Failure responding to request") + return + } + + return +} + +// CreateCertificatePreparer prepares the CreateCertificate request. +func (client BaseClient) CreateCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateCreateParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "certificate-name": autorest.Encode("path", certificateName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/{certificate-name}/create", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateCertificateSender sends the CreateCertificate request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) CreateCertificateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// CreateCertificateResponder handles the response to the CreateCertificate request. The method always +// closes the http.Response Body. +func (client BaseClient) CreateCertificateResponder(resp *http.Response) (result CertificateOperation, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateKey the create key operation can be used to create any key type in Azure Key Vault. If the named key already +// exists, Azure Key Vault creates a new version of the key. It requires the keys/create permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// keyName - the name for the new key. The system will generate the version name for the new key. +// parameters - the parameters to create a key. +func (client BaseClient) CreateKey(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyCreateParameters) (result KeyBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.CreateKey") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: keyName, + Constraints: []validation.Constraint{{Target: "keyName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "CreateKey", err.Error()) + } + + req, err := client.CreateKeyPreparer(ctx, vaultBaseURL, keyName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateKey", nil, "Failure preparing request") + return + } + + resp, err := client.CreateKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateKey", resp, "Failure sending request") + return + } + + result, err = client.CreateKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateKey", resp, "Failure responding to request") + return + } + + return +} + +// CreateKeyPreparer prepares the CreateKey request. +func (client BaseClient) CreateKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyCreateParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "key-name": autorest.Encode("path", keyName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/keys/{key-name}/create", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateKeySender sends the CreateKey request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) CreateKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// CreateKeyResponder handles the response to the CreateKey request. The method always +// closes the http.Response Body. +func (client BaseClient) CreateKeyResponder(resp *http.Response) (result KeyBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Decrypt the DECRYPT operation decrypts a well-formed block of ciphertext using the target encryption key and +// specified algorithm. This operation is the reverse of the ENCRYPT operation; only a single block of data may be +// decrypted, the size of this block is dependent on the target key and the algorithm to be used. The DECRYPT operation +// applies to asymmetric and symmetric keys stored in Azure Key Vault since it uses the private portion of the key. +// This operation requires the keys/decrypt permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// keyName - the name of the key. +// keyVersion - the version of the key. +// parameters - the parameters for the decryption operation. +func (client BaseClient) Decrypt(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Decrypt") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "Decrypt", err.Error()) + } + + req, err := client.DecryptPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Decrypt", nil, "Failure preparing request") + return + } + + resp, err := client.DecryptSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Decrypt", resp, "Failure sending request") + return + } + + result, err = client.DecryptResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Decrypt", resp, "Failure responding to request") + return + } + + return +} + +// DecryptPreparer prepares the Decrypt request. +func (client BaseClient) DecryptPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "key-name": autorest.Encode("path", keyName), + "key-version": autorest.Encode("path", keyVersion), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/keys/{key-name}/{key-version}/decrypt", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DecryptSender sends the Decrypt request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) DecryptSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DecryptResponder handles the response to the Decrypt request. The method always +// closes the http.Response Body. +func (client BaseClient) DecryptResponder(resp *http.Response) (result KeyOperationResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteCertificate deletes all versions of a certificate object along with its associated policy. Delete certificate +// cannot be used to remove individual versions of a certificate object. This operation requires the +// certificates/delete permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// certificateName - the name of the certificate. +func (client BaseClient) DeleteCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result DeletedCertificateBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeleteCertificatePreparer(ctx, vaultBaseURL, certificateName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificate", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteCertificateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificate", resp, "Failure sending request") + return + } + + result, err = client.DeleteCertificateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificate", resp, "Failure responding to request") + return + } + + return +} + +// DeleteCertificatePreparer prepares the DeleteCertificate request. +func (client BaseClient) DeleteCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "certificate-name": autorest.Encode("path", certificateName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/{certificate-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteCertificateSender sends the DeleteCertificate request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) DeleteCertificateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteCertificateResponder handles the response to the DeleteCertificate request. The method always +// closes the http.Response Body. +func (client BaseClient) DeleteCertificateResponder(resp *http.Response) (result DeletedCertificateBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteCertificateContacts deletes the certificate contacts for a specified key vault certificate. This operation +// requires the certificates/managecontacts permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +func (client BaseClient) DeleteCertificateContacts(ctx context.Context, vaultBaseURL string) (result Contacts, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificateContacts") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeleteCertificateContactsPreparer(ctx, vaultBaseURL) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateContacts", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteCertificateContactsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateContacts", resp, "Failure sending request") + return + } + + result, err = client.DeleteCertificateContactsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateContacts", resp, "Failure responding to request") + return + } + + return +} + +// DeleteCertificateContactsPreparer prepares the DeleteCertificateContacts request. +func (client BaseClient) DeleteCertificateContactsPreparer(ctx context.Context, vaultBaseURL string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/certificates/contacts"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteCertificateContactsSender sends the DeleteCertificateContacts request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) DeleteCertificateContactsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteCertificateContactsResponder handles the response to the DeleteCertificateContacts request. The method always +// closes the http.Response Body. +func (client BaseClient) DeleteCertificateContactsResponder(resp *http.Response) (result Contacts, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteCertificateIssuer the DeleteCertificateIssuer operation permanently removes the specified certificate issuer +// from the vault. This operation requires the certificates/manageissuers/deleteissuers permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// issuerName - the name of the issuer. +func (client BaseClient) DeleteCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string) (result IssuerBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificateIssuer") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeleteCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateIssuer", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteCertificateIssuerSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateIssuer", resp, "Failure sending request") + return + } + + result, err = client.DeleteCertificateIssuerResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateIssuer", resp, "Failure responding to request") + return + } + + return +} + +// DeleteCertificateIssuerPreparer prepares the DeleteCertificateIssuer request. +func (client BaseClient) DeleteCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "issuer-name": autorest.Encode("path", issuerName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteCertificateIssuerSender sends the DeleteCertificateIssuer request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) DeleteCertificateIssuerSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteCertificateIssuerResponder handles the response to the DeleteCertificateIssuer request. The method always +// closes the http.Response Body. +func (client BaseClient) DeleteCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteCertificateOperation deletes the creation operation for a specified certificate that is in the process of +// being created. The certificate is no longer created. This operation requires the certificates/update permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// certificateName - the name of the certificate. +func (client BaseClient) DeleteCertificateOperation(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificateOperation, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificateOperation") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeleteCertificateOperationPreparer(ctx, vaultBaseURL, certificateName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateOperation", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteCertificateOperationSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateOperation", resp, "Failure sending request") + return + } + + result, err = client.DeleteCertificateOperationResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateOperation", resp, "Failure responding to request") + return + } + + return +} + +// DeleteCertificateOperationPreparer prepares the DeleteCertificateOperation request. +func (client BaseClient) DeleteCertificateOperationPreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "certificate-name": autorest.Encode("path", certificateName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/{certificate-name}/pending", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteCertificateOperationSender sends the DeleteCertificateOperation request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) DeleteCertificateOperationSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteCertificateOperationResponder handles the response to the DeleteCertificateOperation request. The method always +// closes the http.Response Body. +func (client BaseClient) DeleteCertificateOperationResponder(resp *http.Response) (result CertificateOperation, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteKey the delete key operation cannot be used to remove individual versions of a key. This operation removes the +// cryptographic material associated with the key, which means the key is not usable for Sign/Verify, Wrap/Unwrap or +// Encrypt/Decrypt operations. This operation requires the keys/delete permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// keyName - the name of the key to delete. +func (client BaseClient) DeleteKey(ctx context.Context, vaultBaseURL string, keyName string) (result DeletedKeyBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteKey") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeleteKeyPreparer(ctx, vaultBaseURL, keyName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteKey", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteKey", resp, "Failure sending request") + return + } + + result, err = client.DeleteKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteKey", resp, "Failure responding to request") + return + } + + return +} + +// DeleteKeyPreparer prepares the DeleteKey request. +func (client BaseClient) DeleteKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "key-name": autorest.Encode("path", keyName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/keys/{key-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteKeySender sends the DeleteKey request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) DeleteKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteKeyResponder handles the response to the DeleteKey request. The method always +// closes the http.Response Body. +func (client BaseClient) DeleteKeyResponder(resp *http.Response) (result DeletedKeyBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteSasDefinition deletes a SAS definition from a specified storage account. This operation requires the +// storage/deletesas permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +// sasDefinitionName - the name of the SAS definition. +func (client BaseClient) DeleteSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result DeletedSasDefinitionBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteSasDefinition") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, + {TargetValue: sasDefinitionName, + Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "DeleteSasDefinition", err.Error()) + } + + req, err := client.DeleteSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSasDefinition", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSasDefinitionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSasDefinition", resp, "Failure sending request") + return + } + + result, err = client.DeleteSasDefinitionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSasDefinition", resp, "Failure responding to request") + return + } + + return +} + +// DeleteSasDefinitionPreparer prepares the DeleteSasDefinition request. +func (client BaseClient) DeleteSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "sas-definition-name": autorest.Encode("path", sasDefinitionName), + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSasDefinitionSender sends the DeleteSasDefinition request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) DeleteSasDefinitionSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteSasDefinitionResponder handles the response to the DeleteSasDefinition request. The method always +// closes the http.Response Body. +func (client BaseClient) DeleteSasDefinitionResponder(resp *http.Response) (result DeletedSasDefinitionBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteSecret the DELETE operation applies to any secret stored in Azure Key Vault. DELETE cannot be applied to an +// individual version of a secret. This operation requires the secrets/delete permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// secretName - the name of the secret. +func (client BaseClient) DeleteSecret(ctx context.Context, vaultBaseURL string, secretName string) (result DeletedSecretBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteSecret") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeleteSecretPreparer(ctx, vaultBaseURL, secretName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSecret", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSecretSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSecret", resp, "Failure sending request") + return + } + + result, err = client.DeleteSecretResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSecret", resp, "Failure responding to request") + return + } + + return +} + +// DeleteSecretPreparer prepares the DeleteSecret request. +func (client BaseClient) DeleteSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "secret-name": autorest.Encode("path", secretName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/secrets/{secret-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSecretSender sends the DeleteSecret request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) DeleteSecretSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteSecretResponder handles the response to the DeleteSecret request. The method always +// closes the http.Response Body. +func (client BaseClient) DeleteSecretResponder(resp *http.Response) (result DeletedSecretBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteStorageAccount deletes a storage account. This operation requires the storage/delete permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +func (client BaseClient) DeleteStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result DeletedStorageBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteStorageAccount") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "DeleteStorageAccount", err.Error()) + } + + req, err := client.DeleteStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteStorageAccount", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteStorageAccountSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteStorageAccount", resp, "Failure sending request") + return + } + + result, err = client.DeleteStorageAccountResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteStorageAccount", resp, "Failure responding to request") + return + } + + return +} + +// DeleteStorageAccountPreparer prepares the DeleteStorageAccount request. +func (client BaseClient) DeleteStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteStorageAccountSender sends the DeleteStorageAccount request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) DeleteStorageAccountSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteStorageAccountResponder handles the response to the DeleteStorageAccount request. The method always +// closes the http.Response Body. +func (client BaseClient) DeleteStorageAccountResponder(resp *http.Response) (result DeletedStorageBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Encrypt the ENCRYPT operation encrypts an arbitrary sequence of bytes using an encryption key that is stored in +// Azure Key Vault. Note that the ENCRYPT operation only supports a single block of data, the size of which is +// dependent on the target key and the encryption algorithm to be used. The ENCRYPT operation is only strictly +// necessary for symmetric keys stored in Azure Key Vault since protection with an asymmetric key can be performed +// using public portion of the key. This operation is supported for asymmetric keys as a convenience for callers that +// have a key-reference but do not have access to the public key material. This operation requires the keys/encrypt +// permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// keyName - the name of the key. +// keyVersion - the version of the key. +// parameters - the parameters for the encryption operation. +func (client BaseClient) Encrypt(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Encrypt") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "Encrypt", err.Error()) + } + + req, err := client.EncryptPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Encrypt", nil, "Failure preparing request") + return + } + + resp, err := client.EncryptSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Encrypt", resp, "Failure sending request") + return + } + + result, err = client.EncryptResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Encrypt", resp, "Failure responding to request") + return + } + + return +} + +// EncryptPreparer prepares the Encrypt request. +func (client BaseClient) EncryptPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "key-name": autorest.Encode("path", keyName), + "key-version": autorest.Encode("path", keyVersion), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/keys/{key-name}/{key-version}/encrypt", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// EncryptSender sends the Encrypt request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) EncryptSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// EncryptResponder handles the response to the Encrypt request. The method always +// closes the http.Response Body. +func (client BaseClient) EncryptResponder(resp *http.Response) (result KeyOperationResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetCertificate gets information about a specific certificate. This operation requires the certificates/get +// permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// certificateName - the name of the certificate in the given vault. +// certificateVersion - the version of the certificate. This URI fragment is optional. If not specified, the +// latest version of the certificate is returned. +func (client BaseClient) GetCertificate(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string) (result CertificateBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetCertificatePreparer(ctx, vaultBaseURL, certificateName, certificateVersion) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificate", nil, "Failure preparing request") + return + } + + resp, err := client.GetCertificateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificate", resp, "Failure sending request") + return + } + + result, err = client.GetCertificateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificate", resp, "Failure responding to request") + return + } + + return +} + +// GetCertificatePreparer prepares the GetCertificate request. +func (client BaseClient) GetCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "certificate-name": autorest.Encode("path", certificateName), + "certificate-version": autorest.Encode("path", certificateVersion), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/{certificate-name}/{certificate-version}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetCertificateSender sends the GetCertificate request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetCertificateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetCertificateResponder handles the response to the GetCertificate request. The method always +// closes the http.Response Body. +func (client BaseClient) GetCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetCertificateContacts the GetCertificateContacts operation returns the set of certificate contact resources in the +// specified key vault. This operation requires the certificates/managecontacts permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +func (client BaseClient) GetCertificateContacts(ctx context.Context, vaultBaseURL string) (result Contacts, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateContacts") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetCertificateContactsPreparer(ctx, vaultBaseURL) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateContacts", nil, "Failure preparing request") + return + } + + resp, err := client.GetCertificateContactsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateContacts", resp, "Failure sending request") + return + } + + result, err = client.GetCertificateContactsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateContacts", resp, "Failure responding to request") + return + } + + return +} + +// GetCertificateContactsPreparer prepares the GetCertificateContacts request. +func (client BaseClient) GetCertificateContactsPreparer(ctx context.Context, vaultBaseURL string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/certificates/contacts"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetCertificateContactsSender sends the GetCertificateContacts request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetCertificateContactsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetCertificateContactsResponder handles the response to the GetCertificateContacts request. The method always +// closes the http.Response Body. +func (client BaseClient) GetCertificateContactsResponder(resp *http.Response) (result Contacts, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetCertificateIssuer the GetCertificateIssuer operation returns the specified certificate issuer resources in the +// specified key vault. This operation requires the certificates/manageissuers/getissuers permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// issuerName - the name of the issuer. +func (client BaseClient) GetCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string) (result IssuerBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateIssuer") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuer", nil, "Failure preparing request") + return + } + + resp, err := client.GetCertificateIssuerSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuer", resp, "Failure sending request") + return + } + + result, err = client.GetCertificateIssuerResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuer", resp, "Failure responding to request") + return + } + + return +} + +// GetCertificateIssuerPreparer prepares the GetCertificateIssuer request. +func (client BaseClient) GetCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "issuer-name": autorest.Encode("path", issuerName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetCertificateIssuerSender sends the GetCertificateIssuer request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetCertificateIssuerSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetCertificateIssuerResponder handles the response to the GetCertificateIssuer request. The method always +// closes the http.Response Body. +func (client BaseClient) GetCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetCertificateIssuers the GetCertificateIssuers operation returns the set of certificate issuer resources in the +// specified key vault. This operation requires the certificates/manageissuers/getissuers permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// maxresults - maximum number of results to return in a page. If not specified the service will return up to +// 25 results. +func (client BaseClient) GetCertificateIssuers(ctx context.Context, vaultBaseURL string, maxresults *int32) (result CertificateIssuerListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateIssuers") + defer func() { + sc := -1 + if result.cilr.Response.Response != nil { + sc = result.cilr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxresults, + Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetCertificateIssuers", err.Error()) + } + + result.fn = client.getCertificateIssuersNextResults + req, err := client.GetCertificateIssuersPreparer(ctx, vaultBaseURL, maxresults) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuers", nil, "Failure preparing request") + return + } + + resp, err := client.GetCertificateIssuersSender(req) + if err != nil { + result.cilr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuers", resp, "Failure sending request") + return + } + + result.cilr, err = client.GetCertificateIssuersResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuers", resp, "Failure responding to request") + return + } + if result.cilr.hasNextLink() && result.cilr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// GetCertificateIssuersPreparer prepares the GetCertificateIssuers request. +func (client BaseClient) GetCertificateIssuersPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if maxresults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxresults) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/certificates/issuers"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetCertificateIssuersSender sends the GetCertificateIssuers request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetCertificateIssuersSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetCertificateIssuersResponder handles the response to the GetCertificateIssuers request. The method always +// closes the http.Response Body. +func (client BaseClient) GetCertificateIssuersResponder(resp *http.Response) (result CertificateIssuerListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getCertificateIssuersNextResults retrieves the next set of results, if any. +func (client BaseClient) getCertificateIssuersNextResults(ctx context.Context, lastResults CertificateIssuerListResult) (result CertificateIssuerListResult, err error) { + req, err := lastResults.certificateIssuerListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateIssuersNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetCertificateIssuersSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateIssuersNextResults", resp, "Failure sending next results request") + } + result, err = client.GetCertificateIssuersResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateIssuersNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetCertificateIssuersComplete enumerates all values, automatically crossing page boundaries as required. +func (client BaseClient) GetCertificateIssuersComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result CertificateIssuerListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateIssuers") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetCertificateIssuers(ctx, vaultBaseURL, maxresults) + return +} + +// GetCertificateOperation gets the creation operation associated with a specified certificate. This operation requires +// the certificates/get permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// certificateName - the name of the certificate. +func (client BaseClient) GetCertificateOperation(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificateOperation, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateOperation") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetCertificateOperationPreparer(ctx, vaultBaseURL, certificateName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateOperation", nil, "Failure preparing request") + return + } + + resp, err := client.GetCertificateOperationSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateOperation", resp, "Failure sending request") + return + } + + result, err = client.GetCertificateOperationResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateOperation", resp, "Failure responding to request") + return + } + + return +} + +// GetCertificateOperationPreparer prepares the GetCertificateOperation request. +func (client BaseClient) GetCertificateOperationPreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "certificate-name": autorest.Encode("path", certificateName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/{certificate-name}/pending", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetCertificateOperationSender sends the GetCertificateOperation request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetCertificateOperationSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetCertificateOperationResponder handles the response to the GetCertificateOperation request. The method always +// closes the http.Response Body. +func (client BaseClient) GetCertificateOperationResponder(resp *http.Response) (result CertificateOperation, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetCertificatePolicy the GetCertificatePolicy operation returns the specified certificate policy resources in the +// specified key vault. This operation requires the certificates/get permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// certificateName - the name of the certificate in a given key vault. +func (client BaseClient) GetCertificatePolicy(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificatePolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificatePolicy") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetCertificatePolicyPreparer(ctx, vaultBaseURL, certificateName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificatePolicy", nil, "Failure preparing request") + return + } + + resp, err := client.GetCertificatePolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificatePolicy", resp, "Failure sending request") + return + } + + result, err = client.GetCertificatePolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificatePolicy", resp, "Failure responding to request") + return + } + + return +} + +// GetCertificatePolicyPreparer prepares the GetCertificatePolicy request. +func (client BaseClient) GetCertificatePolicyPreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "certificate-name": autorest.Encode("path", certificateName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/{certificate-name}/policy", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetCertificatePolicySender sends the GetCertificatePolicy request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetCertificatePolicySender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetCertificatePolicyResponder handles the response to the GetCertificatePolicy request. The method always +// closes the http.Response Body. +func (client BaseClient) GetCertificatePolicyResponder(resp *http.Response) (result CertificatePolicy, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetCertificates the GetCertificates operation returns the set of certificates resources in the specified key vault. +// This operation requires the certificates/list permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// maxresults - maximum number of results to return in a page. If not specified the service will return up to +// 25 results. +// includePending - specifies whether to include certificates which are not completely provisioned. +func (client BaseClient) GetCertificates(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result CertificateListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificates") + defer func() { + sc := -1 + if result.clr.Response.Response != nil { + sc = result.clr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxresults, + Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetCertificates", err.Error()) + } + + result.fn = client.getCertificatesNextResults + req, err := client.GetCertificatesPreparer(ctx, vaultBaseURL, maxresults, includePending) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificates", nil, "Failure preparing request") + return + } + + resp, err := client.GetCertificatesSender(req) + if err != nil { + result.clr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificates", resp, "Failure sending request") + return + } + + result.clr, err = client.GetCertificatesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificates", resp, "Failure responding to request") + return + } + if result.clr.hasNextLink() && result.clr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// GetCertificatesPreparer prepares the GetCertificates request. +func (client BaseClient) GetCertificatesPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if maxresults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxresults) + } + if includePending != nil { + queryParameters["includePending"] = autorest.Encode("query", *includePending) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/certificates"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetCertificatesSender sends the GetCertificates request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetCertificatesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetCertificatesResponder handles the response to the GetCertificates request. The method always +// closes the http.Response Body. +func (client BaseClient) GetCertificatesResponder(resp *http.Response) (result CertificateListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getCertificatesNextResults retrieves the next set of results, if any. +func (client BaseClient) getCertificatesNextResults(ctx context.Context, lastResults CertificateListResult) (result CertificateListResult, err error) { + req, err := lastResults.certificateListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificatesNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetCertificatesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificatesNextResults", resp, "Failure sending next results request") + } + result, err = client.GetCertificatesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificatesNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetCertificatesComplete enumerates all values, automatically crossing page boundaries as required. +func (client BaseClient) GetCertificatesComplete(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result CertificateListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificates") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetCertificates(ctx, vaultBaseURL, maxresults, includePending) + return +} + +// GetCertificateVersions the GetCertificateVersions operation returns the versions of a certificate in the specified +// key vault. This operation requires the certificates/list permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// certificateName - the name of the certificate. +// maxresults - maximum number of results to return in a page. If not specified the service will return up to +// 25 results. +func (client BaseClient) GetCertificateVersions(ctx context.Context, vaultBaseURL string, certificateName string, maxresults *int32) (result CertificateListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateVersions") + defer func() { + sc := -1 + if result.clr.Response.Response != nil { + sc = result.clr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxresults, + Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetCertificateVersions", err.Error()) + } + + result.fn = client.getCertificateVersionsNextResults + req, err := client.GetCertificateVersionsPreparer(ctx, vaultBaseURL, certificateName, maxresults) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateVersions", nil, "Failure preparing request") + return + } + + resp, err := client.GetCertificateVersionsSender(req) + if err != nil { + result.clr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateVersions", resp, "Failure sending request") + return + } + + result.clr, err = client.GetCertificateVersionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateVersions", resp, "Failure responding to request") + return + } + if result.clr.hasNextLink() && result.clr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// GetCertificateVersionsPreparer prepares the GetCertificateVersions request. +func (client BaseClient) GetCertificateVersionsPreparer(ctx context.Context, vaultBaseURL string, certificateName string, maxresults *int32) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "certificate-name": autorest.Encode("path", certificateName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if maxresults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxresults) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/{certificate-name}/versions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetCertificateVersionsSender sends the GetCertificateVersions request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetCertificateVersionsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetCertificateVersionsResponder handles the response to the GetCertificateVersions request. The method always +// closes the http.Response Body. +func (client BaseClient) GetCertificateVersionsResponder(resp *http.Response) (result CertificateListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getCertificateVersionsNextResults retrieves the next set of results, if any. +func (client BaseClient) getCertificateVersionsNextResults(ctx context.Context, lastResults CertificateListResult) (result CertificateListResult, err error) { + req, err := lastResults.certificateListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateVersionsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetCertificateVersionsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateVersionsNextResults", resp, "Failure sending next results request") + } + result, err = client.GetCertificateVersionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateVersionsNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetCertificateVersionsComplete enumerates all values, automatically crossing page boundaries as required. +func (client BaseClient) GetCertificateVersionsComplete(ctx context.Context, vaultBaseURL string, certificateName string, maxresults *int32) (result CertificateListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateVersions") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetCertificateVersions(ctx, vaultBaseURL, certificateName, maxresults) + return +} + +// GetDeletedCertificate the GetDeletedCertificate operation retrieves the deleted certificate information plus its +// attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This +// operation requires the certificates/get permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// certificateName - the name of the certificate +func (client BaseClient) GetDeletedCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result DeletedCertificateBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedCertificate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetDeletedCertificatePreparer(ctx, vaultBaseURL, certificateName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificate", nil, "Failure preparing request") + return + } + + resp, err := client.GetDeletedCertificateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificate", resp, "Failure sending request") + return + } + + result, err = client.GetDeletedCertificateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificate", resp, "Failure responding to request") + return + } + + return +} + +// GetDeletedCertificatePreparer prepares the GetDeletedCertificate request. +func (client BaseClient) GetDeletedCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "certificate-name": autorest.Encode("path", certificateName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedcertificates/{certificate-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetDeletedCertificateSender sends the GetDeletedCertificate request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetDeletedCertificateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetDeletedCertificateResponder handles the response to the GetDeletedCertificate request. The method always +// closes the http.Response Body. +func (client BaseClient) GetDeletedCertificateResponder(resp *http.Response) (result DeletedCertificateBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetDeletedCertificates the GetDeletedCertificates operation retrieves the certificates in the current vault which +// are in a deleted state and ready for recovery or purging. This operation includes deletion-specific information. +// This operation requires the certificates/get/list permission. This operation can only be enabled on soft-delete +// enabled vaults. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// maxresults - maximum number of results to return in a page. If not specified the service will return up to +// 25 results. +// includePending - specifies whether to include certificates which are not completely provisioned. +func (client BaseClient) GetDeletedCertificates(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result DeletedCertificateListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedCertificates") + defer func() { + sc := -1 + if result.dclr.Response.Response != nil { + sc = result.dclr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxresults, + Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetDeletedCertificates", err.Error()) + } + + result.fn = client.getDeletedCertificatesNextResults + req, err := client.GetDeletedCertificatesPreparer(ctx, vaultBaseURL, maxresults, includePending) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificates", nil, "Failure preparing request") + return + } + + resp, err := client.GetDeletedCertificatesSender(req) + if err != nil { + result.dclr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificates", resp, "Failure sending request") + return + } + + result.dclr, err = client.GetDeletedCertificatesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificates", resp, "Failure responding to request") + return + } + if result.dclr.hasNextLink() && result.dclr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// GetDeletedCertificatesPreparer prepares the GetDeletedCertificates request. +func (client BaseClient) GetDeletedCertificatesPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if maxresults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxresults) + } + if includePending != nil { + queryParameters["includePending"] = autorest.Encode("query", *includePending) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/deletedcertificates"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetDeletedCertificatesSender sends the GetDeletedCertificates request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetDeletedCertificatesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetDeletedCertificatesResponder handles the response to the GetDeletedCertificates request. The method always +// closes the http.Response Body. +func (client BaseClient) GetDeletedCertificatesResponder(resp *http.Response) (result DeletedCertificateListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getDeletedCertificatesNextResults retrieves the next set of results, if any. +func (client BaseClient) getDeletedCertificatesNextResults(ctx context.Context, lastResults DeletedCertificateListResult) (result DeletedCertificateListResult, err error) { + req, err := lastResults.deletedCertificateListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedCertificatesNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetDeletedCertificatesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedCertificatesNextResults", resp, "Failure sending next results request") + } + result, err = client.GetDeletedCertificatesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedCertificatesNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetDeletedCertificatesComplete enumerates all values, automatically crossing page boundaries as required. +func (client BaseClient) GetDeletedCertificatesComplete(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result DeletedCertificateListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedCertificates") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetDeletedCertificates(ctx, vaultBaseURL, maxresults, includePending) + return +} + +// GetDeletedKey the Get Deleted Key operation is applicable for soft-delete enabled vaults. While the operation can be +// invoked on any vault, it will return an error if invoked on a non soft-delete enabled vault. This operation requires +// the keys/get permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// keyName - the name of the key. +func (client BaseClient) GetDeletedKey(ctx context.Context, vaultBaseURL string, keyName string) (result DeletedKeyBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedKey") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetDeletedKeyPreparer(ctx, vaultBaseURL, keyName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKey", nil, "Failure preparing request") + return + } + + resp, err := client.GetDeletedKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKey", resp, "Failure sending request") + return + } + + result, err = client.GetDeletedKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKey", resp, "Failure responding to request") + return + } + + return +} + +// GetDeletedKeyPreparer prepares the GetDeletedKey request. +func (client BaseClient) GetDeletedKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "key-name": autorest.Encode("path", keyName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedkeys/{key-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetDeletedKeySender sends the GetDeletedKey request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetDeletedKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetDeletedKeyResponder handles the response to the GetDeletedKey request. The method always +// closes the http.Response Body. +func (client BaseClient) GetDeletedKeyResponder(resp *http.Response) (result DeletedKeyBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetDeletedKeys retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the public part +// of a deleted key. This operation includes deletion-specific information. The Get Deleted Keys operation is +// applicable for vaults enabled for soft-delete. While the operation can be invoked on any vault, it will return an +// error if invoked on a non soft-delete enabled vault. This operation requires the keys/list permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// maxresults - maximum number of results to return in a page. If not specified the service will return up to +// 25 results. +func (client BaseClient) GetDeletedKeys(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedKeyListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedKeys") + defer func() { + sc := -1 + if result.dklr.Response.Response != nil { + sc = result.dklr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxresults, + Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetDeletedKeys", err.Error()) + } + + result.fn = client.getDeletedKeysNextResults + req, err := client.GetDeletedKeysPreparer(ctx, vaultBaseURL, maxresults) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKeys", nil, "Failure preparing request") + return + } + + resp, err := client.GetDeletedKeysSender(req) + if err != nil { + result.dklr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKeys", resp, "Failure sending request") + return + } + + result.dklr, err = client.GetDeletedKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKeys", resp, "Failure responding to request") + return + } + if result.dklr.hasNextLink() && result.dklr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// GetDeletedKeysPreparer prepares the GetDeletedKeys request. +func (client BaseClient) GetDeletedKeysPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if maxresults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxresults) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/deletedkeys"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetDeletedKeysSender sends the GetDeletedKeys request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetDeletedKeysSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetDeletedKeysResponder handles the response to the GetDeletedKeys request. The method always +// closes the http.Response Body. +func (client BaseClient) GetDeletedKeysResponder(resp *http.Response) (result DeletedKeyListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getDeletedKeysNextResults retrieves the next set of results, if any. +func (client BaseClient) getDeletedKeysNextResults(ctx context.Context, lastResults DeletedKeyListResult) (result DeletedKeyListResult, err error) { + req, err := lastResults.deletedKeyListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedKeysNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetDeletedKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedKeysNextResults", resp, "Failure sending next results request") + } + result, err = client.GetDeletedKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedKeysNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetDeletedKeysComplete enumerates all values, automatically crossing page boundaries as required. +func (client BaseClient) GetDeletedKeysComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedKeyListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedKeys") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetDeletedKeys(ctx, vaultBaseURL, maxresults) + return +} + +// GetDeletedSasDefinition the Get Deleted SAS Definition operation returns the specified deleted SAS definition along +// with its attributes. This operation requires the storage/getsas permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +// sasDefinitionName - the name of the SAS definition. +func (client BaseClient) GetDeletedSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result DeletedSasDefinitionBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSasDefinition") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, + {TargetValue: sasDefinitionName, + Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetDeletedSasDefinition", err.Error()) + } + + req, err := client.GetDeletedSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinition", nil, "Failure preparing request") + return + } + + resp, err := client.GetDeletedSasDefinitionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinition", resp, "Failure sending request") + return + } + + result, err = client.GetDeletedSasDefinitionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinition", resp, "Failure responding to request") + return + } + + return +} + +// GetDeletedSasDefinitionPreparer prepares the GetDeletedSasDefinition request. +func (client BaseClient) GetDeletedSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "sas-definition-name": autorest.Encode("path", sasDefinitionName), + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedstorage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetDeletedSasDefinitionSender sends the GetDeletedSasDefinition request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetDeletedSasDefinitionSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetDeletedSasDefinitionResponder handles the response to the GetDeletedSasDefinition request. The method always +// closes the http.Response Body. +func (client BaseClient) GetDeletedSasDefinitionResponder(resp *http.Response) (result DeletedSasDefinitionBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetDeletedSasDefinitions the Get Deleted Sas Definitions operation returns the SAS definitions that have been +// deleted for a vault enabled for soft-delete. This operation requires the storage/listsas permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +// maxresults - maximum number of results to return in a page. If not specified the service will return up to +// 25 results. +func (client BaseClient) GetDeletedSasDefinitions(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result DeletedSasDefinitionListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSasDefinitions") + defer func() { + sc := -1 + if result.dsdlr.Response.Response != nil { + sc = result.dsdlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, + {TargetValue: maxresults, + Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetDeletedSasDefinitions", err.Error()) + } + + result.fn = client.getDeletedSasDefinitionsNextResults + req, err := client.GetDeletedSasDefinitionsPreparer(ctx, vaultBaseURL, storageAccountName, maxresults) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinitions", nil, "Failure preparing request") + return + } + + resp, err := client.GetDeletedSasDefinitionsSender(req) + if err != nil { + result.dsdlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinitions", resp, "Failure sending request") + return + } + + result.dsdlr, err = client.GetDeletedSasDefinitionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinitions", resp, "Failure responding to request") + return + } + if result.dsdlr.hasNextLink() && result.dsdlr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// GetDeletedSasDefinitionsPreparer prepares the GetDeletedSasDefinitions request. +func (client BaseClient) GetDeletedSasDefinitionsPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if maxresults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxresults) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedstorage/{storage-account-name}/sas", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetDeletedSasDefinitionsSender sends the GetDeletedSasDefinitions request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetDeletedSasDefinitionsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetDeletedSasDefinitionsResponder handles the response to the GetDeletedSasDefinitions request. The method always +// closes the http.Response Body. +func (client BaseClient) GetDeletedSasDefinitionsResponder(resp *http.Response) (result DeletedSasDefinitionListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getDeletedSasDefinitionsNextResults retrieves the next set of results, if any. +func (client BaseClient) getDeletedSasDefinitionsNextResults(ctx context.Context, lastResults DeletedSasDefinitionListResult) (result DeletedSasDefinitionListResult, err error) { + req, err := lastResults.deletedSasDefinitionListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSasDefinitionsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetDeletedSasDefinitionsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSasDefinitionsNextResults", resp, "Failure sending next results request") + } + result, err = client.GetDeletedSasDefinitionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSasDefinitionsNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetDeletedSasDefinitionsComplete enumerates all values, automatically crossing page boundaries as required. +func (client BaseClient) GetDeletedSasDefinitionsComplete(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result DeletedSasDefinitionListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSasDefinitions") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetDeletedSasDefinitions(ctx, vaultBaseURL, storageAccountName, maxresults) + return +} + +// GetDeletedSecret the Get Deleted Secret operation returns the specified deleted secret along with its attributes. +// This operation requires the secrets/get permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// secretName - the name of the secret. +func (client BaseClient) GetDeletedSecret(ctx context.Context, vaultBaseURL string, secretName string) (result DeletedSecretBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSecret") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetDeletedSecretPreparer(ctx, vaultBaseURL, secretName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecret", nil, "Failure preparing request") + return + } + + resp, err := client.GetDeletedSecretSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecret", resp, "Failure sending request") + return + } + + result, err = client.GetDeletedSecretResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecret", resp, "Failure responding to request") + return + } + + return +} + +// GetDeletedSecretPreparer prepares the GetDeletedSecret request. +func (client BaseClient) GetDeletedSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "secret-name": autorest.Encode("path", secretName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedsecrets/{secret-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetDeletedSecretSender sends the GetDeletedSecret request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetDeletedSecretSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetDeletedSecretResponder handles the response to the GetDeletedSecret request. The method always +// closes the http.Response Body. +func (client BaseClient) GetDeletedSecretResponder(resp *http.Response) (result DeletedSecretBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetDeletedSecrets the Get Deleted Secrets operation returns the secrets that have been deleted for a vault enabled +// for soft-delete. This operation requires the secrets/list permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// maxresults - maximum number of results to return in a page. If not specified the service will return up to +// 25 results. +func (client BaseClient) GetDeletedSecrets(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedSecretListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSecrets") + defer func() { + sc := -1 + if result.dslr.Response.Response != nil { + sc = result.dslr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxresults, + Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetDeletedSecrets", err.Error()) + } + + result.fn = client.getDeletedSecretsNextResults + req, err := client.GetDeletedSecretsPreparer(ctx, vaultBaseURL, maxresults) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecrets", nil, "Failure preparing request") + return + } + + resp, err := client.GetDeletedSecretsSender(req) + if err != nil { + result.dslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecrets", resp, "Failure sending request") + return + } + + result.dslr, err = client.GetDeletedSecretsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecrets", resp, "Failure responding to request") + return + } + if result.dslr.hasNextLink() && result.dslr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// GetDeletedSecretsPreparer prepares the GetDeletedSecrets request. +func (client BaseClient) GetDeletedSecretsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if maxresults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxresults) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/deletedsecrets"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetDeletedSecretsSender sends the GetDeletedSecrets request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetDeletedSecretsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetDeletedSecretsResponder handles the response to the GetDeletedSecrets request. The method always +// closes the http.Response Body. +func (client BaseClient) GetDeletedSecretsResponder(resp *http.Response) (result DeletedSecretListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getDeletedSecretsNextResults retrieves the next set of results, if any. +func (client BaseClient) getDeletedSecretsNextResults(ctx context.Context, lastResults DeletedSecretListResult) (result DeletedSecretListResult, err error) { + req, err := lastResults.deletedSecretListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSecretsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetDeletedSecretsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSecretsNextResults", resp, "Failure sending next results request") + } + result, err = client.GetDeletedSecretsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSecretsNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetDeletedSecretsComplete enumerates all values, automatically crossing page boundaries as required. +func (client BaseClient) GetDeletedSecretsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedSecretListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSecrets") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetDeletedSecrets(ctx, vaultBaseURL, maxresults) + return +} + +// GetDeletedStorageAccount the Get Deleted Storage Account operation returns the specified deleted storage account +// along with its attributes. This operation requires the storage/get permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +func (client BaseClient) GetDeletedStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result DeletedStorageBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedStorageAccount") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetDeletedStorageAccount", err.Error()) + } + + req, err := client.GetDeletedStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccount", nil, "Failure preparing request") + return + } + + resp, err := client.GetDeletedStorageAccountSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccount", resp, "Failure sending request") + return + } + + result, err = client.GetDeletedStorageAccountResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccount", resp, "Failure responding to request") + return + } + + return +} + +// GetDeletedStorageAccountPreparer prepares the GetDeletedStorageAccount request. +func (client BaseClient) GetDeletedStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedstorage/{storage-account-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetDeletedStorageAccountSender sends the GetDeletedStorageAccount request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetDeletedStorageAccountResponder handles the response to the GetDeletedStorageAccount request. The method always +// closes the http.Response Body. +func (client BaseClient) GetDeletedStorageAccountResponder(resp *http.Response) (result DeletedStorageBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetDeletedStorageAccounts the Get Deleted Storage Accounts operation returns the storage accounts that have been +// deleted for a vault enabled for soft-delete. This operation requires the storage/list permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// maxresults - maximum number of results to return in a page. If not specified the service will return up to +// 25 results. +func (client BaseClient) GetDeletedStorageAccounts(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedStorageListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedStorageAccounts") + defer func() { + sc := -1 + if result.dslr.Response.Response != nil { + sc = result.dslr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxresults, + Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetDeletedStorageAccounts", err.Error()) + } + + result.fn = client.getDeletedStorageAccountsNextResults + req, err := client.GetDeletedStorageAccountsPreparer(ctx, vaultBaseURL, maxresults) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccounts", nil, "Failure preparing request") + return + } + + resp, err := client.GetDeletedStorageAccountsSender(req) + if err != nil { + result.dslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccounts", resp, "Failure sending request") + return + } + + result.dslr, err = client.GetDeletedStorageAccountsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccounts", resp, "Failure responding to request") + return + } + if result.dslr.hasNextLink() && result.dslr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// GetDeletedStorageAccountsPreparer prepares the GetDeletedStorageAccounts request. +func (client BaseClient) GetDeletedStorageAccountsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if maxresults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxresults) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/deletedstorage"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetDeletedStorageAccountsSender sends the GetDeletedStorageAccounts request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetDeletedStorageAccountsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetDeletedStorageAccountsResponder handles the response to the GetDeletedStorageAccounts request. The method always +// closes the http.Response Body. +func (client BaseClient) GetDeletedStorageAccountsResponder(resp *http.Response) (result DeletedStorageListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getDeletedStorageAccountsNextResults retrieves the next set of results, if any. +func (client BaseClient) getDeletedStorageAccountsNextResults(ctx context.Context, lastResults DeletedStorageListResult) (result DeletedStorageListResult, err error) { + req, err := lastResults.deletedStorageListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedStorageAccountsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetDeletedStorageAccountsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedStorageAccountsNextResults", resp, "Failure sending next results request") + } + result, err = client.GetDeletedStorageAccountsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedStorageAccountsNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetDeletedStorageAccountsComplete enumerates all values, automatically crossing page boundaries as required. +func (client BaseClient) GetDeletedStorageAccountsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedStorageListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedStorageAccounts") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetDeletedStorageAccounts(ctx, vaultBaseURL, maxresults) + return +} + +// GetKey the get key operation is applicable to all key types. If the requested key is symmetric, then no key material +// is released in the response. This operation requires the keys/get permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// keyName - the name of the key to get. +// keyVersion - adding the version parameter retrieves a specific version of a key. This URI fragment is +// optional. If not specified, the latest version of the key is returned. +func (client BaseClient) GetKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string) (result KeyBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKey") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKey", nil, "Failure preparing request") + return + } + + resp, err := client.GetKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKey", resp, "Failure sending request") + return + } + + result, err = client.GetKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKey", resp, "Failure responding to request") + return + } + + return +} + +// GetKeyPreparer prepares the GetKey request. +func (client BaseClient) GetKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "key-name": autorest.Encode("path", keyName), + "key-version": autorest.Encode("path", keyVersion), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/keys/{key-name}/{key-version}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetKeySender sends the GetKey request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetKeyResponder handles the response to the GetKey request. The method always +// closes the http.Response Body. +func (client BaseClient) GetKeyResponder(resp *http.Response) (result KeyBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetKeys retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the public part of a +// stored key. The LIST operation is applicable to all key types, however only the base key identifier, attributes, and +// tags are provided in the response. Individual versions of a key are not listed in the response. This operation +// requires the keys/list permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// maxresults - maximum number of results to return in a page. If not specified the service will return up to +// 25 results. +func (client BaseClient) GetKeys(ctx context.Context, vaultBaseURL string, maxresults *int32) (result KeyListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeys") + defer func() { + sc := -1 + if result.klr.Response.Response != nil { + sc = result.klr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxresults, + Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetKeys", err.Error()) + } + + result.fn = client.getKeysNextResults + req, err := client.GetKeysPreparer(ctx, vaultBaseURL, maxresults) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeys", nil, "Failure preparing request") + return + } + + resp, err := client.GetKeysSender(req) + if err != nil { + result.klr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeys", resp, "Failure sending request") + return + } + + result.klr, err = client.GetKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeys", resp, "Failure responding to request") + return + } + if result.klr.hasNextLink() && result.klr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// GetKeysPreparer prepares the GetKeys request. +func (client BaseClient) GetKeysPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if maxresults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxresults) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/keys"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetKeysSender sends the GetKeys request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetKeysSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetKeysResponder handles the response to the GetKeys request. The method always +// closes the http.Response Body. +func (client BaseClient) GetKeysResponder(resp *http.Response) (result KeyListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getKeysNextResults retrieves the next set of results, if any. +func (client BaseClient) getKeysNextResults(ctx context.Context, lastResults KeyListResult) (result KeyListResult, err error) { + req, err := lastResults.keyListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeysNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeysNextResults", resp, "Failure sending next results request") + } + result, err = client.GetKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeysNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetKeysComplete enumerates all values, automatically crossing page boundaries as required. +func (client BaseClient) GetKeysComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result KeyListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeys") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetKeys(ctx, vaultBaseURL, maxresults) + return +} + +// GetKeyVersions the full key identifier, attributes, and tags are provided in the response. This operation requires +// the keys/list permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// keyName - the name of the key. +// maxresults - maximum number of results to return in a page. If not specified the service will return up to +// 25 results. +func (client BaseClient) GetKeyVersions(ctx context.Context, vaultBaseURL string, keyName string, maxresults *int32) (result KeyListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeyVersions") + defer func() { + sc := -1 + if result.klr.Response.Response != nil { + sc = result.klr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxresults, + Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetKeyVersions", err.Error()) + } + + result.fn = client.getKeyVersionsNextResults + req, err := client.GetKeyVersionsPreparer(ctx, vaultBaseURL, keyName, maxresults) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeyVersions", nil, "Failure preparing request") + return + } + + resp, err := client.GetKeyVersionsSender(req) + if err != nil { + result.klr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeyVersions", resp, "Failure sending request") + return + } + + result.klr, err = client.GetKeyVersionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeyVersions", resp, "Failure responding to request") + return + } + if result.klr.hasNextLink() && result.klr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// GetKeyVersionsPreparer prepares the GetKeyVersions request. +func (client BaseClient) GetKeyVersionsPreparer(ctx context.Context, vaultBaseURL string, keyName string, maxresults *int32) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "key-name": autorest.Encode("path", keyName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if maxresults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxresults) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/keys/{key-name}/versions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetKeyVersionsSender sends the GetKeyVersions request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetKeyVersionsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetKeyVersionsResponder handles the response to the GetKeyVersions request. The method always +// closes the http.Response Body. +func (client BaseClient) GetKeyVersionsResponder(resp *http.Response) (result KeyListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getKeyVersionsNextResults retrieves the next set of results, if any. +func (client BaseClient) getKeyVersionsNextResults(ctx context.Context, lastResults KeyListResult) (result KeyListResult, err error) { + req, err := lastResults.keyListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeyVersionsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetKeyVersionsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeyVersionsNextResults", resp, "Failure sending next results request") + } + result, err = client.GetKeyVersionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeyVersionsNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetKeyVersionsComplete enumerates all values, automatically crossing page boundaries as required. +func (client BaseClient) GetKeyVersionsComplete(ctx context.Context, vaultBaseURL string, keyName string, maxresults *int32) (result KeyListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeyVersions") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetKeyVersions(ctx, vaultBaseURL, keyName, maxresults) + return +} + +// GetSasDefinition gets information about a SAS definition for the specified storage account. This operation requires +// the storage/getsas permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +// sasDefinitionName - the name of the SAS definition. +func (client BaseClient) GetSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result SasDefinitionBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSasDefinition") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, + {TargetValue: sasDefinitionName, + Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetSasDefinition", err.Error()) + } + + req, err := client.GetSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinition", nil, "Failure preparing request") + return + } + + resp, err := client.GetSasDefinitionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinition", resp, "Failure sending request") + return + } + + result, err = client.GetSasDefinitionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinition", resp, "Failure responding to request") + return + } + + return +} + +// GetSasDefinitionPreparer prepares the GetSasDefinition request. +func (client BaseClient) GetSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "sas-definition-name": autorest.Encode("path", sasDefinitionName), + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSasDefinitionSender sends the GetSasDefinition request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetSasDefinitionSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetSasDefinitionResponder handles the response to the GetSasDefinition request. The method always +// closes the http.Response Body. +func (client BaseClient) GetSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetSasDefinitions list storage SAS definitions for the given storage account. This operation requires the +// storage/listsas permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +// maxresults - maximum number of results to return in a page. If not specified the service will return up to +// 25 results. +func (client BaseClient) GetSasDefinitions(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result SasDefinitionListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSasDefinitions") + defer func() { + sc := -1 + if result.sdlr.Response.Response != nil { + sc = result.sdlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, + {TargetValue: maxresults, + Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetSasDefinitions", err.Error()) + } + + result.fn = client.getSasDefinitionsNextResults + req, err := client.GetSasDefinitionsPreparer(ctx, vaultBaseURL, storageAccountName, maxresults) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinitions", nil, "Failure preparing request") + return + } + + resp, err := client.GetSasDefinitionsSender(req) + if err != nil { + result.sdlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinitions", resp, "Failure sending request") + return + } + + result.sdlr, err = client.GetSasDefinitionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinitions", resp, "Failure responding to request") + return + } + if result.sdlr.hasNextLink() && result.sdlr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// GetSasDefinitionsPreparer prepares the GetSasDefinitions request. +func (client BaseClient) GetSasDefinitionsPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if maxresults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxresults) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/storage/{storage-account-name}/sas", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSasDefinitionsSender sends the GetSasDefinitions request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetSasDefinitionsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetSasDefinitionsResponder handles the response to the GetSasDefinitions request. The method always +// closes the http.Response Body. +func (client BaseClient) GetSasDefinitionsResponder(resp *http.Response) (result SasDefinitionListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getSasDefinitionsNextResults retrieves the next set of results, if any. +func (client BaseClient) getSasDefinitionsNextResults(ctx context.Context, lastResults SasDefinitionListResult) (result SasDefinitionListResult, err error) { + req, err := lastResults.sasDefinitionListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSasDefinitionsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetSasDefinitionsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSasDefinitionsNextResults", resp, "Failure sending next results request") + } + result, err = client.GetSasDefinitionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSasDefinitionsNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetSasDefinitionsComplete enumerates all values, automatically crossing page boundaries as required. +func (client BaseClient) GetSasDefinitionsComplete(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result SasDefinitionListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSasDefinitions") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetSasDefinitions(ctx, vaultBaseURL, storageAccountName, maxresults) + return +} + +// GetSecret the GET operation is applicable to any secret stored in Azure Key Vault. This operation requires the +// secrets/get permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// secretName - the name of the secret. +// secretVersion - the version of the secret. This URI fragment is optional. If not specified, the latest +// version of the secret is returned. +func (client BaseClient) GetSecret(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string) (result SecretBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecret") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetSecretPreparer(ctx, vaultBaseURL, secretName, secretVersion) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecret", nil, "Failure preparing request") + return + } + + resp, err := client.GetSecretSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecret", resp, "Failure sending request") + return + } + + result, err = client.GetSecretResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecret", resp, "Failure responding to request") + return + } + + return +} + +// GetSecretPreparer prepares the GetSecret request. +func (client BaseClient) GetSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "secret-name": autorest.Encode("path", secretName), + "secret-version": autorest.Encode("path", secretVersion), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/secrets/{secret-name}/{secret-version}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSecretSender sends the GetSecret request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetSecretSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetSecretResponder handles the response to the GetSecret request. The method always +// closes the http.Response Body. +func (client BaseClient) GetSecretResponder(resp *http.Response) (result SecretBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetSecrets the Get Secrets operation is applicable to the entire vault. However, only the base secret identifier and +// its attributes are provided in the response. Individual secret versions are not listed in the response. This +// operation requires the secrets/list permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// maxresults - maximum number of results to return in a page. If not specified, the service will return up to +// 25 results. +func (client BaseClient) GetSecrets(ctx context.Context, vaultBaseURL string, maxresults *int32) (result SecretListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecrets") + defer func() { + sc := -1 + if result.slr.Response.Response != nil { + sc = result.slr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxresults, + Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetSecrets", err.Error()) + } + + result.fn = client.getSecretsNextResults + req, err := client.GetSecretsPreparer(ctx, vaultBaseURL, maxresults) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecrets", nil, "Failure preparing request") + return + } + + resp, err := client.GetSecretsSender(req) + if err != nil { + result.slr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecrets", resp, "Failure sending request") + return + } + + result.slr, err = client.GetSecretsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecrets", resp, "Failure responding to request") + return + } + if result.slr.hasNextLink() && result.slr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// GetSecretsPreparer prepares the GetSecrets request. +func (client BaseClient) GetSecretsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if maxresults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxresults) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/secrets"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSecretsSender sends the GetSecrets request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetSecretsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetSecretsResponder handles the response to the GetSecrets request. The method always +// closes the http.Response Body. +func (client BaseClient) GetSecretsResponder(resp *http.Response) (result SecretListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getSecretsNextResults retrieves the next set of results, if any. +func (client BaseClient) getSecretsNextResults(ctx context.Context, lastResults SecretListResult) (result SecretListResult, err error) { + req, err := lastResults.secretListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetSecretsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretsNextResults", resp, "Failure sending next results request") + } + result, err = client.GetSecretsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretsNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetSecretsComplete enumerates all values, automatically crossing page boundaries as required. +func (client BaseClient) GetSecretsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result SecretListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecrets") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetSecrets(ctx, vaultBaseURL, maxresults) + return +} + +// GetSecretVersions the full secret identifier and attributes are provided in the response. No values are returned for +// the secrets. This operations requires the secrets/list permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// secretName - the name of the secret. +// maxresults - maximum number of results to return in a page. If not specified, the service will return up to +// 25 results. +func (client BaseClient) GetSecretVersions(ctx context.Context, vaultBaseURL string, secretName string, maxresults *int32) (result SecretListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecretVersions") + defer func() { + sc := -1 + if result.slr.Response.Response != nil { + sc = result.slr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxresults, + Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetSecretVersions", err.Error()) + } + + result.fn = client.getSecretVersionsNextResults + req, err := client.GetSecretVersionsPreparer(ctx, vaultBaseURL, secretName, maxresults) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecretVersions", nil, "Failure preparing request") + return + } + + resp, err := client.GetSecretVersionsSender(req) + if err != nil { + result.slr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecretVersions", resp, "Failure sending request") + return + } + + result.slr, err = client.GetSecretVersionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecretVersions", resp, "Failure responding to request") + return + } + if result.slr.hasNextLink() && result.slr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// GetSecretVersionsPreparer prepares the GetSecretVersions request. +func (client BaseClient) GetSecretVersionsPreparer(ctx context.Context, vaultBaseURL string, secretName string, maxresults *int32) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "secret-name": autorest.Encode("path", secretName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if maxresults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxresults) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/secrets/{secret-name}/versions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSecretVersionsSender sends the GetSecretVersions request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetSecretVersionsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetSecretVersionsResponder handles the response to the GetSecretVersions request. The method always +// closes the http.Response Body. +func (client BaseClient) GetSecretVersionsResponder(resp *http.Response) (result SecretListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getSecretVersionsNextResults retrieves the next set of results, if any. +func (client BaseClient) getSecretVersionsNextResults(ctx context.Context, lastResults SecretListResult) (result SecretListResult, err error) { + req, err := lastResults.secretListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretVersionsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetSecretVersionsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretVersionsNextResults", resp, "Failure sending next results request") + } + result, err = client.GetSecretVersionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretVersionsNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetSecretVersionsComplete enumerates all values, automatically crossing page boundaries as required. +func (client BaseClient) GetSecretVersionsComplete(ctx context.Context, vaultBaseURL string, secretName string, maxresults *int32) (result SecretListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecretVersions") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetSecretVersions(ctx, vaultBaseURL, secretName, maxresults) + return +} + +// GetStorageAccount gets information about a specified storage account. This operation requires the storage/get +// permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +func (client BaseClient) GetStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result StorageBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetStorageAccount") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetStorageAccount", err.Error()) + } + + req, err := client.GetStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccount", nil, "Failure preparing request") + return + } + + resp, err := client.GetStorageAccountSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccount", resp, "Failure sending request") + return + } + + result, err = client.GetStorageAccountResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccount", resp, "Failure responding to request") + return + } + + return +} + +// GetStorageAccountPreparer prepares the GetStorageAccount request. +func (client BaseClient) GetStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetStorageAccountSender sends the GetStorageAccount request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetStorageAccountSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetStorageAccountResponder handles the response to the GetStorageAccount request. The method always +// closes the http.Response Body. +func (client BaseClient) GetStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetStorageAccounts list storage accounts managed by the specified key vault. This operation requires the +// storage/list permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// maxresults - maximum number of results to return in a page. If not specified the service will return up to +// 25 results. +func (client BaseClient) GetStorageAccounts(ctx context.Context, vaultBaseURL string, maxresults *int32) (result StorageListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetStorageAccounts") + defer func() { + sc := -1 + if result.slr.Response.Response != nil { + sc = result.slr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxresults, + Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetStorageAccounts", err.Error()) + } + + result.fn = client.getStorageAccountsNextResults + req, err := client.GetStorageAccountsPreparer(ctx, vaultBaseURL, maxresults) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccounts", nil, "Failure preparing request") + return + } + + resp, err := client.GetStorageAccountsSender(req) + if err != nil { + result.slr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccounts", resp, "Failure sending request") + return + } + + result.slr, err = client.GetStorageAccountsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccounts", resp, "Failure responding to request") + return + } + if result.slr.hasNextLink() && result.slr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// GetStorageAccountsPreparer prepares the GetStorageAccounts request. +func (client BaseClient) GetStorageAccountsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if maxresults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxresults) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/storage"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetStorageAccountsSender sends the GetStorageAccounts request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetStorageAccountsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetStorageAccountsResponder handles the response to the GetStorageAccounts request. The method always +// closes the http.Response Body. +func (client BaseClient) GetStorageAccountsResponder(resp *http.Response) (result StorageListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getStorageAccountsNextResults retrieves the next set of results, if any. +func (client BaseClient) getStorageAccountsNextResults(ctx context.Context, lastResults StorageListResult) (result StorageListResult, err error) { + req, err := lastResults.storageListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getStorageAccountsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetStorageAccountsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getStorageAccountsNextResults", resp, "Failure sending next results request") + } + result, err = client.GetStorageAccountsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getStorageAccountsNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetStorageAccountsComplete enumerates all values, automatically crossing page boundaries as required. +func (client BaseClient) GetStorageAccountsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result StorageListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetStorageAccounts") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetStorageAccounts(ctx, vaultBaseURL, maxresults) + return +} + +// ImportCertificate imports an existing valid certificate, containing a private key, into Azure Key Vault. The +// certificate to be imported can be in either PFX or PEM format. If the certificate is in PEM format the PEM file must +// contain the key as well as x509 certificates. This operation requires the certificates/import permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// certificateName - the name of the certificate. +// parameters - the parameters to import the certificate. +func (client BaseClient) ImportCertificate(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateImportParameters) (result CertificateBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.ImportCertificate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: certificateName, + Constraints: []validation.Constraint{{Target: "certificateName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Base64EncodedCertificate", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.CertificatePolicy", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}}}, + }}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "ImportCertificate", err.Error()) + } + + req, err := client.ImportCertificatePreparer(ctx, vaultBaseURL, certificateName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportCertificate", nil, "Failure preparing request") + return + } + + resp, err := client.ImportCertificateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportCertificate", resp, "Failure sending request") + return + } + + result, err = client.ImportCertificateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportCertificate", resp, "Failure responding to request") + return + } + + return +} + +// ImportCertificatePreparer prepares the ImportCertificate request. +func (client BaseClient) ImportCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateImportParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "certificate-name": autorest.Encode("path", certificateName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/{certificate-name}/import", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ImportCertificateSender sends the ImportCertificate request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) ImportCertificateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ImportCertificateResponder handles the response to the ImportCertificate request. The method always +// closes the http.Response Body. +func (client BaseClient) ImportCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ImportKey the import key operation may be used to import any key type into an Azure Key Vault. If the named key +// already exists, Azure Key Vault creates a new version of the key. This operation requires the keys/import +// permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// keyName - name for the imported key. +// parameters - the parameters to import a key. +func (client BaseClient) ImportKey(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyImportParameters) (result KeyBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.ImportKey") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: keyName, + Constraints: []validation.Constraint{{Target: "keyName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Key", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "ImportKey", err.Error()) + } + + req, err := client.ImportKeyPreparer(ctx, vaultBaseURL, keyName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportKey", nil, "Failure preparing request") + return + } + + resp, err := client.ImportKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportKey", resp, "Failure sending request") + return + } + + result, err = client.ImportKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportKey", resp, "Failure responding to request") + return + } + + return +} + +// ImportKeyPreparer prepares the ImportKey request. +func (client BaseClient) ImportKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyImportParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "key-name": autorest.Encode("path", keyName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/keys/{key-name}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ImportKeySender sends the ImportKey request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) ImportKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ImportKeyResponder handles the response to the ImportKey request. The method always +// closes the http.Response Body. +func (client BaseClient) ImportKeyResponder(resp *http.Response) (result KeyBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// MergeCertificate the MergeCertificate operation performs the merging of a certificate or certificate chain with a +// key pair currently available in the service. This operation requires the certificates/create permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// certificateName - the name of the certificate. +// parameters - the parameters to merge certificate. +func (client BaseClient) MergeCertificate(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateMergeParameters) (result CertificateBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.MergeCertificate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.X509Certificates", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "MergeCertificate", err.Error()) + } + + req, err := client.MergeCertificatePreparer(ctx, vaultBaseURL, certificateName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "MergeCertificate", nil, "Failure preparing request") + return + } + + resp, err := client.MergeCertificateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "MergeCertificate", resp, "Failure sending request") + return + } + + result, err = client.MergeCertificateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "MergeCertificate", resp, "Failure responding to request") + return + } + + return +} + +// MergeCertificatePreparer prepares the MergeCertificate request. +func (client BaseClient) MergeCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateMergeParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "certificate-name": autorest.Encode("path", certificateName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/{certificate-name}/pending/merge", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// MergeCertificateSender sends the MergeCertificate request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) MergeCertificateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// MergeCertificateResponder handles the response to the MergeCertificate request. The method always +// closes the http.Response Body. +func (client BaseClient) MergeCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// PurgeDeletedCertificate the PurgeDeletedCertificate operation performs an irreversible deletion of the specified +// certificate, without possibility for recovery. The operation is not available if the recovery level does not specify +// 'Purgeable'. This operation requires the certificate/purge permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// certificateName - the name of the certificate +func (client BaseClient) PurgeDeletedCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedCertificate") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.PurgeDeletedCertificatePreparer(ctx, vaultBaseURL, certificateName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedCertificate", nil, "Failure preparing request") + return + } + + resp, err := client.PurgeDeletedCertificateSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedCertificate", resp, "Failure sending request") + return + } + + result, err = client.PurgeDeletedCertificateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedCertificate", resp, "Failure responding to request") + return + } + + return +} + +// PurgeDeletedCertificatePreparer prepares the PurgeDeletedCertificate request. +func (client BaseClient) PurgeDeletedCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "certificate-name": autorest.Encode("path", certificateName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedcertificates/{certificate-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PurgeDeletedCertificateSender sends the PurgeDeletedCertificate request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) PurgeDeletedCertificateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// PurgeDeletedCertificateResponder handles the response to the PurgeDeletedCertificate request. The method always +// closes the http.Response Body. +func (client BaseClient) PurgeDeletedCertificateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// PurgeDeletedKey the Purge Deleted Key operation is applicable for soft-delete enabled vaults. While the operation +// can be invoked on any vault, it will return an error if invoked on a non soft-delete enabled vault. This operation +// requires the keys/purge permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// keyName - the name of the key +func (client BaseClient) PurgeDeletedKey(ctx context.Context, vaultBaseURL string, keyName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedKey") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.PurgeDeletedKeyPreparer(ctx, vaultBaseURL, keyName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedKey", nil, "Failure preparing request") + return + } + + resp, err := client.PurgeDeletedKeySender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedKey", resp, "Failure sending request") + return + } + + result, err = client.PurgeDeletedKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedKey", resp, "Failure responding to request") + return + } + + return +} + +// PurgeDeletedKeyPreparer prepares the PurgeDeletedKey request. +func (client BaseClient) PurgeDeletedKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "key-name": autorest.Encode("path", keyName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedkeys/{key-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PurgeDeletedKeySender sends the PurgeDeletedKey request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) PurgeDeletedKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// PurgeDeletedKeyResponder handles the response to the PurgeDeletedKey request. The method always +// closes the http.Response Body. +func (client BaseClient) PurgeDeletedKeyResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// PurgeDeletedSecret the purge deleted secret operation removes the secret permanently, without the possibility of +// recovery. This operation can only be enabled on a soft-delete enabled vault. This operation requires the +// secrets/purge permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// secretName - the name of the secret. +func (client BaseClient) PurgeDeletedSecret(ctx context.Context, vaultBaseURL string, secretName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedSecret") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.PurgeDeletedSecretPreparer(ctx, vaultBaseURL, secretName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedSecret", nil, "Failure preparing request") + return + } + + resp, err := client.PurgeDeletedSecretSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedSecret", resp, "Failure sending request") + return + } + + result, err = client.PurgeDeletedSecretResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedSecret", resp, "Failure responding to request") + return + } + + return +} + +// PurgeDeletedSecretPreparer prepares the PurgeDeletedSecret request. +func (client BaseClient) PurgeDeletedSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "secret-name": autorest.Encode("path", secretName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedsecrets/{secret-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PurgeDeletedSecretSender sends the PurgeDeletedSecret request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) PurgeDeletedSecretSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// PurgeDeletedSecretResponder handles the response to the PurgeDeletedSecret request. The method always +// closes the http.Response Body. +func (client BaseClient) PurgeDeletedSecretResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// PurgeDeletedStorageAccount the purge deleted storage account operation removes the secret permanently, without the +// possibility of recovery. This operation can only be performed on a soft-delete enabled vault. This operation +// requires the storage/purge permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +func (client BaseClient) PurgeDeletedStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedStorageAccount") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "PurgeDeletedStorageAccount", err.Error()) + } + + req, err := client.PurgeDeletedStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedStorageAccount", nil, "Failure preparing request") + return + } + + resp, err := client.PurgeDeletedStorageAccountSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedStorageAccount", resp, "Failure sending request") + return + } + + result, err = client.PurgeDeletedStorageAccountResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedStorageAccount", resp, "Failure responding to request") + return + } + + return +} + +// PurgeDeletedStorageAccountPreparer prepares the PurgeDeletedStorageAccount request. +func (client BaseClient) PurgeDeletedStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedstorage/{storage-account-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PurgeDeletedStorageAccountSender sends the PurgeDeletedStorageAccount request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) PurgeDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// PurgeDeletedStorageAccountResponder handles the response to the PurgeDeletedStorageAccount request. The method always +// closes the http.Response Body. +func (client BaseClient) PurgeDeletedStorageAccountResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// RecoverDeletedCertificate the RecoverDeletedCertificate operation performs the reversal of the Delete operation. The +// operation is applicable in vaults enabled for soft-delete, and must be issued during the retention interval +// (available in the deleted certificate's attributes). This operation requires the certificates/recover permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// certificateName - the name of the deleted certificate +func (client BaseClient) RecoverDeletedCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificateBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedCertificate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.RecoverDeletedCertificatePreparer(ctx, vaultBaseURL, certificateName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedCertificate", nil, "Failure preparing request") + return + } + + resp, err := client.RecoverDeletedCertificateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedCertificate", resp, "Failure sending request") + return + } + + result, err = client.RecoverDeletedCertificateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedCertificate", resp, "Failure responding to request") + return + } + + return +} + +// RecoverDeletedCertificatePreparer prepares the RecoverDeletedCertificate request. +func (client BaseClient) RecoverDeletedCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "certificate-name": autorest.Encode("path", certificateName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedcertificates/{certificate-name}/recover", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RecoverDeletedCertificateSender sends the RecoverDeletedCertificate request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) RecoverDeletedCertificateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// RecoverDeletedCertificateResponder handles the response to the RecoverDeletedCertificate request. The method always +// closes the http.Response Body. +func (client BaseClient) RecoverDeletedCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RecoverDeletedKey the Recover Deleted Key operation is applicable for deleted keys in soft-delete enabled vaults. It +// recovers the deleted key back to its latest version under /keys. An attempt to recover an non-deleted key will +// return an error. Consider this the inverse of the delete operation on soft-delete enabled vaults. This operation +// requires the keys/recover permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// keyName - the name of the deleted key. +func (client BaseClient) RecoverDeletedKey(ctx context.Context, vaultBaseURL string, keyName string) (result KeyBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedKey") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.RecoverDeletedKeyPreparer(ctx, vaultBaseURL, keyName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedKey", nil, "Failure preparing request") + return + } + + resp, err := client.RecoverDeletedKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedKey", resp, "Failure sending request") + return + } + + result, err = client.RecoverDeletedKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedKey", resp, "Failure responding to request") + return + } + + return +} + +// RecoverDeletedKeyPreparer prepares the RecoverDeletedKey request. +func (client BaseClient) RecoverDeletedKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "key-name": autorest.Encode("path", keyName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedkeys/{key-name}/recover", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RecoverDeletedKeySender sends the RecoverDeletedKey request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) RecoverDeletedKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// RecoverDeletedKeyResponder handles the response to the RecoverDeletedKey request. The method always +// closes the http.Response Body. +func (client BaseClient) RecoverDeletedKeyResponder(resp *http.Response) (result KeyBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RecoverDeletedSasDefinition recovers the deleted SAS definition for the specified storage account. This operation +// can only be performed on a soft-delete enabled vault. This operation requires the storage/recover permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +// sasDefinitionName - the name of the SAS definition. +func (client BaseClient) RecoverDeletedSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result SasDefinitionBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedSasDefinition") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, + {TargetValue: sasDefinitionName, + Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "RecoverDeletedSasDefinition", err.Error()) + } + + req, err := client.RecoverDeletedSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSasDefinition", nil, "Failure preparing request") + return + } + + resp, err := client.RecoverDeletedSasDefinitionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSasDefinition", resp, "Failure sending request") + return + } + + result, err = client.RecoverDeletedSasDefinitionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSasDefinition", resp, "Failure responding to request") + return + } + + return +} + +// RecoverDeletedSasDefinitionPreparer prepares the RecoverDeletedSasDefinition request. +func (client BaseClient) RecoverDeletedSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "sas-definition-name": autorest.Encode("path", sasDefinitionName), + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedstorage/{storage-account-name}/sas/{sas-definition-name}/recover", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RecoverDeletedSasDefinitionSender sends the RecoverDeletedSasDefinition request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) RecoverDeletedSasDefinitionSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// RecoverDeletedSasDefinitionResponder handles the response to the RecoverDeletedSasDefinition request. The method always +// closes the http.Response Body. +func (client BaseClient) RecoverDeletedSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RecoverDeletedSecret recovers the deleted secret in the specified vault. This operation can only be performed on a +// soft-delete enabled vault. This operation requires the secrets/recover permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// secretName - the name of the deleted secret. +func (client BaseClient) RecoverDeletedSecret(ctx context.Context, vaultBaseURL string, secretName string) (result SecretBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedSecret") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.RecoverDeletedSecretPreparer(ctx, vaultBaseURL, secretName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSecret", nil, "Failure preparing request") + return + } + + resp, err := client.RecoverDeletedSecretSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSecret", resp, "Failure sending request") + return + } + + result, err = client.RecoverDeletedSecretResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSecret", resp, "Failure responding to request") + return + } + + return +} + +// RecoverDeletedSecretPreparer prepares the RecoverDeletedSecret request. +func (client BaseClient) RecoverDeletedSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "secret-name": autorest.Encode("path", secretName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedsecrets/{secret-name}/recover", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RecoverDeletedSecretSender sends the RecoverDeletedSecret request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) RecoverDeletedSecretSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// RecoverDeletedSecretResponder handles the response to the RecoverDeletedSecret request. The method always +// closes the http.Response Body. +func (client BaseClient) RecoverDeletedSecretResponder(resp *http.Response) (result SecretBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RecoverDeletedStorageAccount recovers the deleted storage account in the specified vault. This operation can only be +// performed on a soft-delete enabled vault. This operation requires the storage/recover permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +func (client BaseClient) RecoverDeletedStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result StorageBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedStorageAccount") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "RecoverDeletedStorageAccount", err.Error()) + } + + req, err := client.RecoverDeletedStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedStorageAccount", nil, "Failure preparing request") + return + } + + resp, err := client.RecoverDeletedStorageAccountSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedStorageAccount", resp, "Failure sending request") + return + } + + result, err = client.RecoverDeletedStorageAccountResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedStorageAccount", resp, "Failure responding to request") + return + } + + return +} + +// RecoverDeletedStorageAccountPreparer prepares the RecoverDeletedStorageAccount request. +func (client BaseClient) RecoverDeletedStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedstorage/{storage-account-name}/recover", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RecoverDeletedStorageAccountSender sends the RecoverDeletedStorageAccount request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) RecoverDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// RecoverDeletedStorageAccountResponder handles the response to the RecoverDeletedStorageAccount request. The method always +// closes the http.Response Body. +func (client BaseClient) RecoverDeletedStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateStorageAccountKey regenerates the specified key value for the given storage account. This operation +// requires the storage/regeneratekey permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +// parameters - the parameters to regenerate storage account key. +func (client BaseClient) RegenerateStorageAccountKey(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountRegenerteKeyParameters) (result StorageBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RegenerateStorageAccountKey") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.KeyName", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "RegenerateStorageAccountKey", err.Error()) + } + + req, err := client.RegenerateStorageAccountKeyPreparer(ctx, vaultBaseURL, storageAccountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RegenerateStorageAccountKey", nil, "Failure preparing request") + return + } + + resp, err := client.RegenerateStorageAccountKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RegenerateStorageAccountKey", resp, "Failure sending request") + return + } + + result, err = client.RegenerateStorageAccountKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RegenerateStorageAccountKey", resp, "Failure responding to request") + return + } + + return +} + +// RegenerateStorageAccountKeyPreparer prepares the RegenerateStorageAccountKey request. +func (client BaseClient) RegenerateStorageAccountKeyPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountRegenerteKeyParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/storage/{storage-account-name}/regeneratekey", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RegenerateStorageAccountKeySender sends the RegenerateStorageAccountKey request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) RegenerateStorageAccountKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// RegenerateStorageAccountKeyResponder handles the response to the RegenerateStorageAccountKey request. The method always +// closes the http.Response Body. +func (client BaseClient) RegenerateStorageAccountKeyResponder(resp *http.Response) (result StorageBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RestoreCertificate restores a backed up certificate, and all its versions, to a vault. This operation requires the +// certificates/restore permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// parameters - the parameters to restore the certificate. +func (client BaseClient) RestoreCertificate(ctx context.Context, vaultBaseURL string, parameters CertificateRestoreParameters) (result CertificateBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreCertificate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.CertificateBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "RestoreCertificate", err.Error()) + } + + req, err := client.RestoreCertificatePreparer(ctx, vaultBaseURL, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreCertificate", nil, "Failure preparing request") + return + } + + resp, err := client.RestoreCertificateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreCertificate", resp, "Failure sending request") + return + } + + result, err = client.RestoreCertificateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreCertificate", resp, "Failure responding to request") + return + } + + return +} + +// RestoreCertificatePreparer prepares the RestoreCertificate request. +func (client BaseClient) RestoreCertificatePreparer(ctx context.Context, vaultBaseURL string, parameters CertificateRestoreParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/certificates/restore"), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RestoreCertificateSender sends the RestoreCertificate request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) RestoreCertificateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// RestoreCertificateResponder handles the response to the RestoreCertificate request. The method always +// closes the http.Response Body. +func (client BaseClient) RestoreCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RestoreKey imports a previously backed up key into Azure Key Vault, restoring the key, its key identifier, +// attributes and access control policies. The RESTORE operation may be used to import a previously backed up key. +// Individual versions of a key cannot be restored. The key is restored in its entirety with the same key name as it +// had when it was backed up. If the key name is not available in the target Key Vault, the RESTORE operation will be +// rejected. While the key name is retained during restore, the final key identifier will change if the key is restored +// to a different vault. Restore will restore all versions and preserve version identifiers. The RESTORE operation is +// subject to security constraints: The target Key Vault must be owned by the same Microsoft Azure Subscription as the +// source Key Vault The user must have RESTORE permission in the target Key Vault. This operation requires the +// keys/restore permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// parameters - the parameters to restore the key. +func (client BaseClient) RestoreKey(ctx context.Context, vaultBaseURL string, parameters KeyRestoreParameters) (result KeyBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreKey") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.KeyBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "RestoreKey", err.Error()) + } + + req, err := client.RestoreKeyPreparer(ctx, vaultBaseURL, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreKey", nil, "Failure preparing request") + return + } + + resp, err := client.RestoreKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreKey", resp, "Failure sending request") + return + } + + result, err = client.RestoreKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreKey", resp, "Failure responding to request") + return + } + + return +} + +// RestoreKeyPreparer prepares the RestoreKey request. +func (client BaseClient) RestoreKeyPreparer(ctx context.Context, vaultBaseURL string, parameters KeyRestoreParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/keys/restore"), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RestoreKeySender sends the RestoreKey request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) RestoreKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// RestoreKeyResponder handles the response to the RestoreKey request. The method always +// closes the http.Response Body. +func (client BaseClient) RestoreKeyResponder(resp *http.Response) (result KeyBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RestoreSecret restores a backed up secret, and all its versions, to a vault. This operation requires the +// secrets/restore permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// parameters - the parameters to restore the secret. +func (client BaseClient) RestoreSecret(ctx context.Context, vaultBaseURL string, parameters SecretRestoreParameters) (result SecretBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreSecret") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.SecretBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "RestoreSecret", err.Error()) + } + + req, err := client.RestoreSecretPreparer(ctx, vaultBaseURL, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreSecret", nil, "Failure preparing request") + return + } + + resp, err := client.RestoreSecretSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreSecret", resp, "Failure sending request") + return + } + + result, err = client.RestoreSecretResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreSecret", resp, "Failure responding to request") + return + } + + return +} + +// RestoreSecretPreparer prepares the RestoreSecret request. +func (client BaseClient) RestoreSecretPreparer(ctx context.Context, vaultBaseURL string, parameters SecretRestoreParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/secrets/restore"), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RestoreSecretSender sends the RestoreSecret request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) RestoreSecretSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// RestoreSecretResponder handles the response to the RestoreSecret request. The method always +// closes the http.Response Body. +func (client BaseClient) RestoreSecretResponder(resp *http.Response) (result SecretBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RestoreStorageAccount restores a backed up storage account to a vault. This operation requires the storage/restore +// permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// parameters - the parameters to restore the storage account. +func (client BaseClient) RestoreStorageAccount(ctx context.Context, vaultBaseURL string, parameters StorageRestoreParameters) (result StorageBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreStorageAccount") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.StorageBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "RestoreStorageAccount", err.Error()) + } + + req, err := client.RestoreStorageAccountPreparer(ctx, vaultBaseURL, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreStorageAccount", nil, "Failure preparing request") + return + } + + resp, err := client.RestoreStorageAccountSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreStorageAccount", resp, "Failure sending request") + return + } + + result, err = client.RestoreStorageAccountResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreStorageAccount", resp, "Failure responding to request") + return + } + + return +} + +// RestoreStorageAccountPreparer prepares the RestoreStorageAccount request. +func (client BaseClient) RestoreStorageAccountPreparer(ctx context.Context, vaultBaseURL string, parameters StorageRestoreParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/storage/restore"), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RestoreStorageAccountSender sends the RestoreStorageAccount request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) RestoreStorageAccountSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// RestoreStorageAccountResponder handles the response to the RestoreStorageAccount request. The method always +// closes the http.Response Body. +func (client BaseClient) RestoreStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// SetCertificateContacts sets the certificate contacts for the specified key vault. This operation requires the +// certificates/managecontacts permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// contacts - the contacts for the key vault certificate. +func (client BaseClient) SetCertificateContacts(ctx context.Context, vaultBaseURL string, contacts Contacts) (result Contacts, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetCertificateContacts") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.SetCertificateContactsPreparer(ctx, vaultBaseURL, contacts) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateContacts", nil, "Failure preparing request") + return + } + + resp, err := client.SetCertificateContactsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateContacts", resp, "Failure sending request") + return + } + + result, err = client.SetCertificateContactsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateContacts", resp, "Failure responding to request") + return + } + + return +} + +// SetCertificateContactsPreparer prepares the SetCertificateContacts request. +func (client BaseClient) SetCertificateContactsPreparer(ctx context.Context, vaultBaseURL string, contacts Contacts) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + contacts.ID = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/certificates/contacts"), + autorest.WithJSON(contacts), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SetCertificateContactsSender sends the SetCertificateContacts request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) SetCertificateContactsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// SetCertificateContactsResponder handles the response to the SetCertificateContacts request. The method always +// closes the http.Response Body. +func (client BaseClient) SetCertificateContactsResponder(resp *http.Response) (result Contacts, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// SetCertificateIssuer the SetCertificateIssuer operation adds or updates the specified certificate issuer. This +// operation requires the certificates/setissuers permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// issuerName - the name of the issuer. +// parameter - certificate issuer set parameter. +func (client BaseClient) SetCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerSetParameters) (result IssuerBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetCertificateIssuer") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameter, + Constraints: []validation.Constraint{{Target: "parameter.Provider", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "SetCertificateIssuer", err.Error()) + } + + req, err := client.SetCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName, parameter) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateIssuer", nil, "Failure preparing request") + return + } + + resp, err := client.SetCertificateIssuerSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateIssuer", resp, "Failure sending request") + return + } + + result, err = client.SetCertificateIssuerResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateIssuer", resp, "Failure responding to request") + return + } + + return +} + +// SetCertificateIssuerPreparer prepares the SetCertificateIssuer request. +func (client BaseClient) SetCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerSetParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "issuer-name": autorest.Encode("path", issuerName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), + autorest.WithJSON(parameter), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SetCertificateIssuerSender sends the SetCertificateIssuer request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) SetCertificateIssuerSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// SetCertificateIssuerResponder handles the response to the SetCertificateIssuer request. The method always +// closes the http.Response Body. +func (client BaseClient) SetCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// SetSasDefinition creates or updates a new SAS definition for the specified storage account. This operation requires +// the storage/setsas permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +// sasDefinitionName - the name of the SAS definition. +// parameters - the parameters to create a SAS definition. +func (client BaseClient) SetSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionCreateParameters) (result SasDefinitionBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetSasDefinition") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, + {TargetValue: sasDefinitionName, + Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.TemplateURI", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.ValidityPeriod", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "SetSasDefinition", err.Error()) + } + + req, err := client.SetSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSasDefinition", nil, "Failure preparing request") + return + } + + resp, err := client.SetSasDefinitionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSasDefinition", resp, "Failure sending request") + return + } + + result, err = client.SetSasDefinitionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSasDefinition", resp, "Failure responding to request") + return + } + + return +} + +// SetSasDefinitionPreparer prepares the SetSasDefinition request. +func (client BaseClient) SetSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionCreateParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "sas-definition-name": autorest.Encode("path", sasDefinitionName), + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SetSasDefinitionSender sends the SetSasDefinition request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) SetSasDefinitionSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// SetSasDefinitionResponder handles the response to the SetSasDefinition request. The method always +// closes the http.Response Body. +func (client BaseClient) SetSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// SetSecret the SET operation adds a secret to the Azure Key Vault. If the named secret already exists, Azure Key +// Vault creates a new version of that secret. This operation requires the secrets/set permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// secretName - the name of the secret. +// parameters - the parameters for setting the secret. +func (client BaseClient) SetSecret(ctx context.Context, vaultBaseURL string, secretName string, parameters SecretSetParameters) (result SecretBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetSecret") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: secretName, + Constraints: []validation.Constraint{{Target: "secretName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "SetSecret", err.Error()) + } + + req, err := client.SetSecretPreparer(ctx, vaultBaseURL, secretName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSecret", nil, "Failure preparing request") + return + } + + resp, err := client.SetSecretSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSecret", resp, "Failure sending request") + return + } + + result, err = client.SetSecretResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSecret", resp, "Failure responding to request") + return + } + + return +} + +// SetSecretPreparer prepares the SetSecret request. +func (client BaseClient) SetSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string, parameters SecretSetParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "secret-name": autorest.Encode("path", secretName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/secrets/{secret-name}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SetSecretSender sends the SetSecret request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) SetSecretSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// SetSecretResponder handles the response to the SetSecret request. The method always +// closes the http.Response Body. +func (client BaseClient) SetSecretResponder(resp *http.Response) (result SecretBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// SetStorageAccount creates or updates a new storage account. This operation requires the storage/set permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +// parameters - the parameters to create a storage account. +func (client BaseClient) SetStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountCreateParameters) (result StorageBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetStorageAccount") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.ResourceID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.ActiveKeyName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.AutoRegenerateKey", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "SetStorageAccount", err.Error()) + } + + req, err := client.SetStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetStorageAccount", nil, "Failure preparing request") + return + } + + resp, err := client.SetStorageAccountSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetStorageAccount", resp, "Failure sending request") + return + } + + result, err = client.SetStorageAccountResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetStorageAccount", resp, "Failure responding to request") + return + } + + return +} + +// SetStorageAccountPreparer prepares the SetStorageAccount request. +func (client BaseClient) SetStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountCreateParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SetStorageAccountSender sends the SetStorageAccount request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) SetStorageAccountSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// SetStorageAccountResponder handles the response to the SetStorageAccount request. The method always +// closes the http.Response Body. +func (client BaseClient) SetStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Sign the SIGN operation is applicable to asymmetric and symmetric keys stored in Azure Key Vault since this +// operation uses the private portion of the key. This operation requires the keys/sign permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// keyName - the name of the key. +// keyVersion - the version of the key. +// parameters - the parameters for the signing operation. +func (client BaseClient) Sign(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeySignParameters) (result KeyOperationResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Sign") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "Sign", err.Error()) + } + + req, err := client.SignPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Sign", nil, "Failure preparing request") + return + } + + resp, err := client.SignSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Sign", resp, "Failure sending request") + return + } + + result, err = client.SignResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Sign", resp, "Failure responding to request") + return + } + + return +} + +// SignPreparer prepares the Sign request. +func (client BaseClient) SignPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeySignParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "key-name": autorest.Encode("path", keyName), + "key-version": autorest.Encode("path", keyVersion), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/keys/{key-name}/{key-version}/sign", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SignSender sends the Sign request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) SignSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// SignResponder handles the response to the Sign request. The method always +// closes the http.Response Body. +func (client BaseClient) SignResponder(resp *http.Response) (result KeyOperationResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// UnwrapKey the UNWRAP operation supports decryption of a symmetric key using the target key encryption key. This +// operation is the reverse of the WRAP operation. The UNWRAP operation applies to asymmetric and symmetric keys stored +// in Azure Key Vault since it uses the private portion of the key. This operation requires the keys/unwrapKey +// permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// keyName - the name of the key. +// keyVersion - the version of the key. +// parameters - the parameters for the key operation. +func (client BaseClient) UnwrapKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UnwrapKey") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "UnwrapKey", err.Error()) + } + + req, err := client.UnwrapKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UnwrapKey", nil, "Failure preparing request") + return + } + + resp, err := client.UnwrapKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UnwrapKey", resp, "Failure sending request") + return + } + + result, err = client.UnwrapKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UnwrapKey", resp, "Failure responding to request") + return + } + + return +} + +// UnwrapKeyPreparer prepares the UnwrapKey request. +func (client BaseClient) UnwrapKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "key-name": autorest.Encode("path", keyName), + "key-version": autorest.Encode("path", keyVersion), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/keys/{key-name}/{key-version}/unwrapkey", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UnwrapKeySender sends the UnwrapKey request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) UnwrapKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UnwrapKeyResponder handles the response to the UnwrapKey request. The method always +// closes the http.Response Body. +func (client BaseClient) UnwrapKeyResponder(resp *http.Response) (result KeyOperationResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// UpdateCertificate the UpdateCertificate operation applies the specified update on the given certificate; the only +// elements updated are the certificate's attributes. This operation requires the certificates/update permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// certificateName - the name of the certificate in the given key vault. +// certificateVersion - the version of the certificate. +// parameters - the parameters for certificate update. +func (client BaseClient) UpdateCertificate(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string, parameters CertificateUpdateParameters) (result CertificateBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdateCertificatePreparer(ctx, vaultBaseURL, certificateName, certificateVersion, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificate", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateCertificateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificate", resp, "Failure sending request") + return + } + + result, err = client.UpdateCertificateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificate", resp, "Failure responding to request") + return + } + + return +} + +// UpdateCertificatePreparer prepares the UpdateCertificate request. +func (client BaseClient) UpdateCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string, parameters CertificateUpdateParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "certificate-name": autorest.Encode("path", certificateName), + "certificate-version": autorest.Encode("path", certificateVersion), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/{certificate-name}/{certificate-version}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateCertificateSender sends the UpdateCertificate request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) UpdateCertificateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UpdateCertificateResponder handles the response to the UpdateCertificate request. The method always +// closes the http.Response Body. +func (client BaseClient) UpdateCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// UpdateCertificateIssuer the UpdateCertificateIssuer operation performs an update on the specified certificate issuer +// entity. This operation requires the certificates/setissuers permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// issuerName - the name of the issuer. +// parameter - certificate issuer update parameter. +func (client BaseClient) UpdateCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerUpdateParameters) (result IssuerBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificateIssuer") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdateCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName, parameter) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateIssuer", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateCertificateIssuerSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateIssuer", resp, "Failure sending request") + return + } + + result, err = client.UpdateCertificateIssuerResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateIssuer", resp, "Failure responding to request") + return + } + + return +} + +// UpdateCertificateIssuerPreparer prepares the UpdateCertificateIssuer request. +func (client BaseClient) UpdateCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerUpdateParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "issuer-name": autorest.Encode("path", issuerName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), + autorest.WithJSON(parameter), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateCertificateIssuerSender sends the UpdateCertificateIssuer request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) UpdateCertificateIssuerSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UpdateCertificateIssuerResponder handles the response to the UpdateCertificateIssuer request. The method always +// closes the http.Response Body. +func (client BaseClient) UpdateCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// UpdateCertificateOperation updates a certificate creation operation that is already in progress. This operation +// requires the certificates/update permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// certificateName - the name of the certificate. +// certificateOperation - the certificate operation response. +func (client BaseClient) UpdateCertificateOperation(ctx context.Context, vaultBaseURL string, certificateName string, certificateOperation CertificateOperationUpdateParameter) (result CertificateOperation, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificateOperation") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdateCertificateOperationPreparer(ctx, vaultBaseURL, certificateName, certificateOperation) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateOperation", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateCertificateOperationSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateOperation", resp, "Failure sending request") + return + } + + result, err = client.UpdateCertificateOperationResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateOperation", resp, "Failure responding to request") + return + } + + return +} + +// UpdateCertificateOperationPreparer prepares the UpdateCertificateOperation request. +func (client BaseClient) UpdateCertificateOperationPreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificateOperation CertificateOperationUpdateParameter) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "certificate-name": autorest.Encode("path", certificateName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/{certificate-name}/pending", pathParameters), + autorest.WithJSON(certificateOperation), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateCertificateOperationSender sends the UpdateCertificateOperation request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) UpdateCertificateOperationSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UpdateCertificateOperationResponder handles the response to the UpdateCertificateOperation request. The method always +// closes the http.Response Body. +func (client BaseClient) UpdateCertificateOperationResponder(resp *http.Response) (result CertificateOperation, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// UpdateCertificatePolicy set specified members in the certificate policy. Leave others as null. This operation +// requires the certificates/update permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// certificateName - the name of the certificate in the given vault. +// certificatePolicy - the policy for the certificate. +func (client BaseClient) UpdateCertificatePolicy(ctx context.Context, vaultBaseURL string, certificateName string, certificatePolicy CertificatePolicy) (result CertificatePolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificatePolicy") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdateCertificatePolicyPreparer(ctx, vaultBaseURL, certificateName, certificatePolicy) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificatePolicy", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateCertificatePolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificatePolicy", resp, "Failure sending request") + return + } + + result, err = client.UpdateCertificatePolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificatePolicy", resp, "Failure responding to request") + return + } + + return +} + +// UpdateCertificatePolicyPreparer prepares the UpdateCertificatePolicy request. +func (client BaseClient) UpdateCertificatePolicyPreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificatePolicy CertificatePolicy) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "certificate-name": autorest.Encode("path", certificateName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + certificatePolicy.ID = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/{certificate-name}/policy", pathParameters), + autorest.WithJSON(certificatePolicy), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateCertificatePolicySender sends the UpdateCertificatePolicy request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) UpdateCertificatePolicySender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UpdateCertificatePolicyResponder handles the response to the UpdateCertificatePolicy request. The method always +// closes the http.Response Body. +func (client BaseClient) UpdateCertificatePolicyResponder(resp *http.Response) (result CertificatePolicy, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// UpdateKey in order to perform this operation, the key must already exist in the Key Vault. Note: The cryptographic +// material of a key itself cannot be changed. This operation requires the keys/update permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// keyName - the name of key to update. +// keyVersion - the version of the key to update. +// parameters - the parameters of the key to update. +func (client BaseClient) UpdateKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyUpdateParameters) (result KeyBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateKey") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdateKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateKey", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateKey", resp, "Failure sending request") + return + } + + result, err = client.UpdateKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateKey", resp, "Failure responding to request") + return + } + + return +} + +// UpdateKeyPreparer prepares the UpdateKey request. +func (client BaseClient) UpdateKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyUpdateParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "key-name": autorest.Encode("path", keyName), + "key-version": autorest.Encode("path", keyVersion), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/keys/{key-name}/{key-version}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateKeySender sends the UpdateKey request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) UpdateKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UpdateKeyResponder handles the response to the UpdateKey request. The method always +// closes the http.Response Body. +func (client BaseClient) UpdateKeyResponder(resp *http.Response) (result KeyBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// UpdateSasDefinition updates the specified attributes associated with the given SAS definition. This operation +// requires the storage/setsas permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +// sasDefinitionName - the name of the SAS definition. +// parameters - the parameters to update a SAS definition. +func (client BaseClient) UpdateSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionUpdateParameters) (result SasDefinitionBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateSasDefinition") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, + {TargetValue: sasDefinitionName, + Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "UpdateSasDefinition", err.Error()) + } + + req, err := client.UpdateSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSasDefinition", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSasDefinitionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSasDefinition", resp, "Failure sending request") + return + } + + result, err = client.UpdateSasDefinitionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSasDefinition", resp, "Failure responding to request") + return + } + + return +} + +// UpdateSasDefinitionPreparer prepares the UpdateSasDefinition request. +func (client BaseClient) UpdateSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionUpdateParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "sas-definition-name": autorest.Encode("path", sasDefinitionName), + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSasDefinitionSender sends the UpdateSasDefinition request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) UpdateSasDefinitionSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UpdateSasDefinitionResponder handles the response to the UpdateSasDefinition request. The method always +// closes the http.Response Body. +func (client BaseClient) UpdateSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// UpdateSecret the UPDATE operation changes specified attributes of an existing stored secret. Attributes that are not +// specified in the request are left unchanged. The value of a secret itself cannot be changed. This operation requires +// the secrets/set permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// secretName - the name of the secret. +// secretVersion - the version of the secret. +// parameters - the parameters for update secret operation. +func (client BaseClient) UpdateSecret(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string, parameters SecretUpdateParameters) (result SecretBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateSecret") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdateSecretPreparer(ctx, vaultBaseURL, secretName, secretVersion, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSecret", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSecretSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSecret", resp, "Failure sending request") + return + } + + result, err = client.UpdateSecretResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSecret", resp, "Failure responding to request") + return + } + + return +} + +// UpdateSecretPreparer prepares the UpdateSecret request. +func (client BaseClient) UpdateSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string, parameters SecretUpdateParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "secret-name": autorest.Encode("path", secretName), + "secret-version": autorest.Encode("path", secretVersion), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/secrets/{secret-name}/{secret-version}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSecretSender sends the UpdateSecret request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) UpdateSecretSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UpdateSecretResponder handles the response to the UpdateSecret request. The method always +// closes the http.Response Body. +func (client BaseClient) UpdateSecretResponder(resp *http.Response) (result SecretBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// UpdateStorageAccount updates the specified attributes associated with the given storage account. This operation +// requires the storage/set/update permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +// parameters - the parameters to update a storage account. +func (client BaseClient) UpdateStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountUpdateParameters) (result StorageBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateStorageAccount") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "UpdateStorageAccount", err.Error()) + } + + req, err := client.UpdateStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateStorageAccount", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateStorageAccountSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateStorageAccount", resp, "Failure sending request") + return + } + + result, err = client.UpdateStorageAccountResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateStorageAccount", resp, "Failure responding to request") + return + } + + return +} + +// UpdateStorageAccountPreparer prepares the UpdateStorageAccount request. +func (client BaseClient) UpdateStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountUpdateParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateStorageAccountSender sends the UpdateStorageAccount request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) UpdateStorageAccountSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UpdateStorageAccountResponder handles the response to the UpdateStorageAccount request. The method always +// closes the http.Response Body. +func (client BaseClient) UpdateStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Verify the VERIFY operation is applicable to symmetric keys stored in Azure Key Vault. VERIFY is not strictly +// necessary for asymmetric keys stored in Azure Key Vault since signature verification can be performed using the +// public portion of the key but this operation is supported as a convenience for callers that only have a +// key-reference and not the public portion of the key. This operation requires the keys/verify permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// keyName - the name of the key. +// keyVersion - the version of the key. +// parameters - the parameters for verify operations. +func (client BaseClient) Verify(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyVerifyParameters) (result KeyVerifyResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Verify") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Digest", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.Signature", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "Verify", err.Error()) + } + + req, err := client.VerifyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Verify", nil, "Failure preparing request") + return + } + + resp, err := client.VerifySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Verify", resp, "Failure sending request") + return + } + + result, err = client.VerifyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Verify", resp, "Failure responding to request") + return + } + + return +} + +// VerifyPreparer prepares the Verify request. +func (client BaseClient) VerifyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyVerifyParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "key-name": autorest.Encode("path", keyName), + "key-version": autorest.Encode("path", keyVersion), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/keys/{key-name}/{key-version}/verify", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// VerifySender sends the Verify request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) VerifySender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// VerifyResponder handles the response to the Verify request. The method always +// closes the http.Response Body. +func (client BaseClient) VerifyResponder(resp *http.Response) (result KeyVerifyResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// WrapKey the WRAP operation supports encryption of a symmetric key using a key encryption key that has previously +// been stored in an Azure Key Vault. The WRAP operation is only strictly necessary for symmetric keys stored in Azure +// Key Vault since protection with an asymmetric key can be performed using the public portion of the key. This +// operation is supported for asymmetric keys as a convenience for callers that have a key-reference but do not have +// access to the public key material. This operation requires the keys/wrapKey permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// keyName - the name of the key. +// keyVersion - the version of the key. +// parameters - the parameters for wrap operation. +func (client BaseClient) WrapKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.WrapKey") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "WrapKey", err.Error()) + } + + req, err := client.WrapKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "WrapKey", nil, "Failure preparing request") + return + } + + resp, err := client.WrapKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "WrapKey", resp, "Failure sending request") + return + } + + result, err = client.WrapKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "WrapKey", resp, "Failure responding to request") + return + } + + return +} + +// WrapKeyPreparer prepares the WrapKey request. +func (client BaseClient) WrapKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "key-name": autorest.Encode("path", keyName), + "key-version": autorest.Encode("path", keyVersion), + } + + const APIVersion = "7.1" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/keys/{key-name}/{key-version}/wrapkey", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// WrapKeySender sends the WrapKey request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) WrapKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// WrapKeyResponder handles the response to the WrapKey request. The method always +// closes the http.Response Body. +func (client BaseClient) WrapKeyResponder(resp *http.Response) (result KeyOperationResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/dataplane_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/dataplane_meta.json new file mode 100644 index 00000000000..311f23dc3c4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/dataplane_meta.json @@ -0,0 +1,11 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/keyvault/data-plane/readme.md", + "tag": "package-7.1", + "use": "@microsoft.azure/autorest.go@2.1.183", + "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.183 --tag=package-7.1 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/keyvault/data-plane/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/enums.go new file mode 100644 index 00000000000..5c4dbbcec28 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/enums.go @@ -0,0 +1,231 @@ +package keyvault + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// ActionType enumerates the values for action type. +type ActionType string + +const ( + // AutoRenew ... + AutoRenew ActionType = "AutoRenew" + // EmailContacts ... + EmailContacts ActionType = "EmailContacts" +) + +// PossibleActionTypeValues returns an array of possible values for the ActionType const type. +func PossibleActionTypeValues() []ActionType { + return []ActionType{AutoRenew, EmailContacts} +} + +// DeletionRecoveryLevel enumerates the values for deletion recovery level. +type DeletionRecoveryLevel string + +const ( + // CustomizedRecoverable Denotes a vault state in which deletion is recoverable without the possibility for + // immediate and permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90).This level + // guarantees the recoverability of the deleted entity during the retention interval and while the + // subscription is still available. + CustomizedRecoverable DeletionRecoveryLevel = "CustomizedRecoverable" + // CustomizedRecoverableProtectedSubscription Denotes a vault and subscription state in which deletion is + // recoverable, immediate and permanent deletion (i.e. purge) is not permitted, and in which the + // subscription itself cannot be permanently canceled when 7<= SoftDeleteRetentionInDays < 90. This level + // guarantees the recoverability of the deleted entity during the retention interval, and also reflects the + // fact that the subscription itself cannot be cancelled. + CustomizedRecoverableProtectedSubscription DeletionRecoveryLevel = "CustomizedRecoverable+ProtectedSubscription" + // CustomizedRecoverablePurgeable Denotes a vault state in which deletion is recoverable, and which also + // permits immediate and permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90). This + // level guarantees the recoverability of the deleted entity during the retention interval, unless a Purge + // operation is requested, or the subscription is cancelled. + CustomizedRecoverablePurgeable DeletionRecoveryLevel = "CustomizedRecoverable+Purgeable" + // Purgeable Denotes a vault state in which deletion is an irreversible operation, without the possibility + // for recovery. This level corresponds to no protection being available against a Delete operation; the + // data is irretrievably lost upon accepting a Delete operation at the entity level or higher (vault, + // resource group, subscription etc.) + Purgeable DeletionRecoveryLevel = "Purgeable" + // Recoverable Denotes a vault state in which deletion is recoverable without the possibility for immediate + // and permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted entity + // during the retention interval(90 days) and while the subscription is still available. System wil + // permanently delete it after 90 days, if not recovered + Recoverable DeletionRecoveryLevel = "Recoverable" + // RecoverableProtectedSubscription Denotes a vault and subscription state in which deletion is recoverable + // within retention interval (90 days), immediate and permanent deletion (i.e. purge) is not permitted, and + // in which the subscription itself cannot be permanently canceled. System wil permanently delete it after + // 90 days, if not recovered + RecoverableProtectedSubscription DeletionRecoveryLevel = "Recoverable+ProtectedSubscription" + // RecoverablePurgeable Denotes a vault state in which deletion is recoverable, and which also permits + // immediate and permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted + // entity during the retention interval (90 days), unless a Purge operation is requested, or the + // subscription is cancelled. System wil permanently delete it after 90 days, if not recovered + RecoverablePurgeable DeletionRecoveryLevel = "Recoverable+Purgeable" +) + +// PossibleDeletionRecoveryLevelValues returns an array of possible values for the DeletionRecoveryLevel const type. +func PossibleDeletionRecoveryLevelValues() []DeletionRecoveryLevel { + return []DeletionRecoveryLevel{CustomizedRecoverable, CustomizedRecoverableProtectedSubscription, CustomizedRecoverablePurgeable, Purgeable, Recoverable, RecoverableProtectedSubscription, RecoverablePurgeable} +} + +// JSONWebKeyCurveName enumerates the values for json web key curve name. +type JSONWebKeyCurveName string + +const ( + // P256 ... + P256 JSONWebKeyCurveName = "P-256" + // P256K ... + P256K JSONWebKeyCurveName = "P-256K" + // P384 ... + P384 JSONWebKeyCurveName = "P-384" + // P521 ... + P521 JSONWebKeyCurveName = "P-521" +) + +// PossibleJSONWebKeyCurveNameValues returns an array of possible values for the JSONWebKeyCurveName const type. +func PossibleJSONWebKeyCurveNameValues() []JSONWebKeyCurveName { + return []JSONWebKeyCurveName{P256, P256K, P384, P521} +} + +// JSONWebKeyEncryptionAlgorithm enumerates the values for json web key encryption algorithm. +type JSONWebKeyEncryptionAlgorithm string + +const ( + // RSA15 ... + RSA15 JSONWebKeyEncryptionAlgorithm = "RSA1_5" + // RSAOAEP ... + RSAOAEP JSONWebKeyEncryptionAlgorithm = "RSA-OAEP" + // RSAOAEP256 ... + RSAOAEP256 JSONWebKeyEncryptionAlgorithm = "RSA-OAEP-256" +) + +// PossibleJSONWebKeyEncryptionAlgorithmValues returns an array of possible values for the JSONWebKeyEncryptionAlgorithm const type. +func PossibleJSONWebKeyEncryptionAlgorithmValues() []JSONWebKeyEncryptionAlgorithm { + return []JSONWebKeyEncryptionAlgorithm{RSA15, RSAOAEP, RSAOAEP256} +} + +// JSONWebKeyOperation enumerates the values for json web key operation. +type JSONWebKeyOperation string + +const ( + // Decrypt ... + Decrypt JSONWebKeyOperation = "decrypt" + // Encrypt ... + Encrypt JSONWebKeyOperation = "encrypt" + // Import ... + Import JSONWebKeyOperation = "import" + // Sign ... + Sign JSONWebKeyOperation = "sign" + // UnwrapKey ... + UnwrapKey JSONWebKeyOperation = "unwrapKey" + // Verify ... + Verify JSONWebKeyOperation = "verify" + // WrapKey ... + WrapKey JSONWebKeyOperation = "wrapKey" +) + +// PossibleJSONWebKeyOperationValues returns an array of possible values for the JSONWebKeyOperation const type. +func PossibleJSONWebKeyOperationValues() []JSONWebKeyOperation { + return []JSONWebKeyOperation{Decrypt, Encrypt, Import, Sign, UnwrapKey, Verify, WrapKey} +} + +// JSONWebKeySignatureAlgorithm enumerates the values for json web key signature algorithm. +type JSONWebKeySignatureAlgorithm string + +const ( + // ES256 ECDSA using P-256 and SHA-256, as described in https://tools.ietf.org/html/rfc7518. + ES256 JSONWebKeySignatureAlgorithm = "ES256" + // ES256K ECDSA using P-256K and SHA-256, as described in https://tools.ietf.org/html/rfc7518 + ES256K JSONWebKeySignatureAlgorithm = "ES256K" + // ES384 ECDSA using P-384 and SHA-384, as described in https://tools.ietf.org/html/rfc7518 + ES384 JSONWebKeySignatureAlgorithm = "ES384" + // ES512 ECDSA using P-521 and SHA-512, as described in https://tools.ietf.org/html/rfc7518 + ES512 JSONWebKeySignatureAlgorithm = "ES512" + // PS256 RSASSA-PSS using SHA-256 and MGF1 with SHA-256, as described in + // https://tools.ietf.org/html/rfc7518 + PS256 JSONWebKeySignatureAlgorithm = "PS256" + // PS384 RSASSA-PSS using SHA-384 and MGF1 with SHA-384, as described in + // https://tools.ietf.org/html/rfc7518 + PS384 JSONWebKeySignatureAlgorithm = "PS384" + // PS512 RSASSA-PSS using SHA-512 and MGF1 with SHA-512, as described in + // https://tools.ietf.org/html/rfc7518 + PS512 JSONWebKeySignatureAlgorithm = "PS512" + // RS256 RSASSA-PKCS1-v1_5 using SHA-256, as described in https://tools.ietf.org/html/rfc7518 + RS256 JSONWebKeySignatureAlgorithm = "RS256" + // RS384 RSASSA-PKCS1-v1_5 using SHA-384, as described in https://tools.ietf.org/html/rfc7518 + RS384 JSONWebKeySignatureAlgorithm = "RS384" + // RS512 RSASSA-PKCS1-v1_5 using SHA-512, as described in https://tools.ietf.org/html/rfc7518 + RS512 JSONWebKeySignatureAlgorithm = "RS512" + // RSNULL Reserved + RSNULL JSONWebKeySignatureAlgorithm = "RSNULL" +) + +// PossibleJSONWebKeySignatureAlgorithmValues returns an array of possible values for the JSONWebKeySignatureAlgorithm const type. +func PossibleJSONWebKeySignatureAlgorithmValues() []JSONWebKeySignatureAlgorithm { + return []JSONWebKeySignatureAlgorithm{ES256, ES256K, ES384, ES512, PS256, PS384, PS512, RS256, RS384, RS512, RSNULL} +} + +// JSONWebKeyType enumerates the values for json web key type. +type JSONWebKeyType string + +const ( + // EC ... + EC JSONWebKeyType = "EC" + // ECHSM ... + ECHSM JSONWebKeyType = "EC-HSM" + // Oct ... + Oct JSONWebKeyType = "oct" + // RSA ... + RSA JSONWebKeyType = "RSA" + // RSAHSM ... + RSAHSM JSONWebKeyType = "RSA-HSM" +) + +// PossibleJSONWebKeyTypeValues returns an array of possible values for the JSONWebKeyType const type. +func PossibleJSONWebKeyTypeValues() []JSONWebKeyType { + return []JSONWebKeyType{EC, ECHSM, Oct, RSA, RSAHSM} +} + +// KeyUsageType enumerates the values for key usage type. +type KeyUsageType string + +const ( + // CRLSign ... + CRLSign KeyUsageType = "cRLSign" + // DataEncipherment ... + DataEncipherment KeyUsageType = "dataEncipherment" + // DecipherOnly ... + DecipherOnly KeyUsageType = "decipherOnly" + // DigitalSignature ... + DigitalSignature KeyUsageType = "digitalSignature" + // EncipherOnly ... + EncipherOnly KeyUsageType = "encipherOnly" + // KeyAgreement ... + KeyAgreement KeyUsageType = "keyAgreement" + // KeyCertSign ... + KeyCertSign KeyUsageType = "keyCertSign" + // KeyEncipherment ... + KeyEncipherment KeyUsageType = "keyEncipherment" + // NonRepudiation ... + NonRepudiation KeyUsageType = "nonRepudiation" +) + +// PossibleKeyUsageTypeValues returns an array of possible values for the KeyUsageType const type. +func PossibleKeyUsageTypeValues() []KeyUsageType { + return []KeyUsageType{CRLSign, DataEncipherment, DecipherOnly, DigitalSignature, EncipherOnly, KeyAgreement, KeyCertSign, KeyEncipherment, NonRepudiation} +} + +// SasTokenType enumerates the values for sas token type. +type SasTokenType string + +const ( + // Account ... + Account SasTokenType = "account" + // Service ... + Service SasTokenType = "service" +) + +// PossibleSasTokenTypeValues returns an array of possible values for the SasTokenType const type. +func PossibleSasTokenTypeValues() []SasTokenType { + return []SasTokenType{Account, Service} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/models.go new file mode 100644 index 00000000000..8ebcb0f24cf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/models.go @@ -0,0 +1,3611 @@ +package keyvault + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault" + +// Action the action that will be executed. +type Action struct { + // ActionType - The type of the action. Possible values include: 'EmailContacts', 'AutoRenew' + ActionType ActionType `json:"action_type,omitempty"` +} + +// AdministratorDetails details of the organization administrator of the certificate issuer. +type AdministratorDetails struct { + // FirstName - First name. + FirstName *string `json:"first_name,omitempty"` + // LastName - Last name. + LastName *string `json:"last_name,omitempty"` + // EmailAddress - Email address. + EmailAddress *string `json:"email,omitempty"` + // Phone - Phone number. + Phone *string `json:"phone,omitempty"` +} + +// Attributes the object attributes managed by the KeyVault service. +type Attributes struct { + // Enabled - Determines whether the object is enabled. + Enabled *bool `json:"enabled,omitempty"` + // NotBefore - Not before date in UTC. + NotBefore *date.UnixTime `json:"nbf,omitempty"` + // Expires - Expiry date in UTC. + Expires *date.UnixTime `json:"exp,omitempty"` + // Created - READ-ONLY; Creation time in UTC. + Created *date.UnixTime `json:"created,omitempty"` + // Updated - READ-ONLY; Last updated time in UTC. + Updated *date.UnixTime `json:"updated,omitempty"` +} + +// MarshalJSON is the custom marshaler for Attributes. +func (a Attributes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if a.Enabled != nil { + objectMap["enabled"] = a.Enabled + } + if a.NotBefore != nil { + objectMap["nbf"] = a.NotBefore + } + if a.Expires != nil { + objectMap["exp"] = a.Expires + } + return json.Marshal(objectMap) +} + +// BackupCertificateResult the backup certificate result, containing the backup blob. +type BackupCertificateResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; The backup blob containing the backed up certificate. (a URL-encoded base64 string) + Value *string `json:"value,omitempty"` +} + +// MarshalJSON is the custom marshaler for BackupCertificateResult. +func (bcr BackupCertificateResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// BackupKeyResult the backup key result, containing the backup blob. +type BackupKeyResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; The backup blob containing the backed up key. (a URL-encoded base64 string) + Value *string `json:"value,omitempty"` +} + +// MarshalJSON is the custom marshaler for BackupKeyResult. +func (bkr BackupKeyResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// BackupSecretResult the backup secret result, containing the backup blob. +type BackupSecretResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; The backup blob containing the backed up secret. (a URL-encoded base64 string) + Value *string `json:"value,omitempty"` +} + +// MarshalJSON is the custom marshaler for BackupSecretResult. +func (bsr BackupSecretResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// BackupStorageResult the backup storage result, containing the backup blob. +type BackupStorageResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; The backup blob containing the backed up storage account. (a URL-encoded base64 string) + Value *string `json:"value,omitempty"` +} + +// MarshalJSON is the custom marshaler for BackupStorageResult. +func (bsr BackupStorageResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// CertificateAttributes the certificate management attributes. +type CertificateAttributes struct { + // RecoverableDays - READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. + RecoverableDays *int32 `json:"recoverableDays,omitempty"` + // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for certificates in the current vault. If it contains 'Purgeable', the certificate can be permanently deleted by a privileged user; otherwise, only the system can purge the certificate, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' + RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` + // Enabled - Determines whether the object is enabled. + Enabled *bool `json:"enabled,omitempty"` + // NotBefore - Not before date in UTC. + NotBefore *date.UnixTime `json:"nbf,omitempty"` + // Expires - Expiry date in UTC. + Expires *date.UnixTime `json:"exp,omitempty"` + // Created - READ-ONLY; Creation time in UTC. + Created *date.UnixTime `json:"created,omitempty"` + // Updated - READ-ONLY; Last updated time in UTC. + Updated *date.UnixTime `json:"updated,omitempty"` +} + +// MarshalJSON is the custom marshaler for CertificateAttributes. +func (ca CertificateAttributes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ca.Enabled != nil { + objectMap["enabled"] = ca.Enabled + } + if ca.NotBefore != nil { + objectMap["nbf"] = ca.NotBefore + } + if ca.Expires != nil { + objectMap["exp"] = ca.Expires + } + return json.Marshal(objectMap) +} + +// CertificateBundle a certificate bundle consists of a certificate (X509) plus its attributes. +type CertificateBundle struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; The certificate id. + ID *string `json:"id,omitempty"` + // Kid - READ-ONLY; The key id. + Kid *string `json:"kid,omitempty"` + // Sid - READ-ONLY; The secret id. + Sid *string `json:"sid,omitempty"` + // X509Thumbprint - READ-ONLY; Thumbprint of the certificate. (a URL-encoded base64 string) + X509Thumbprint *string `json:"x5t,omitempty"` + // Policy - READ-ONLY; The management policy. + Policy *CertificatePolicy `json:"policy,omitempty"` + // Cer - CER contents of x509 certificate. + Cer *[]byte `json:"cer,omitempty"` + // ContentType - The content type of the secret. + ContentType *string `json:"contentType,omitempty"` + // Attributes - The certificate attributes. + Attributes *CertificateAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for CertificateBundle. +func (cb CertificateBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if cb.Cer != nil { + objectMap["cer"] = cb.Cer + } + if cb.ContentType != nil { + objectMap["contentType"] = cb.ContentType + } + if cb.Attributes != nil { + objectMap["attributes"] = cb.Attributes + } + if cb.Tags != nil { + objectMap["tags"] = cb.Tags + } + return json.Marshal(objectMap) +} + +// CertificateCreateParameters the certificate create parameters. +type CertificateCreateParameters struct { + // CertificatePolicy - The management policy for the certificate. + CertificatePolicy *CertificatePolicy `json:"policy,omitempty"` + // CertificateAttributes - The attributes of the certificate (optional). + CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for CertificateCreateParameters. +func (ccp CertificateCreateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ccp.CertificatePolicy != nil { + objectMap["policy"] = ccp.CertificatePolicy + } + if ccp.CertificateAttributes != nil { + objectMap["attributes"] = ccp.CertificateAttributes + } + if ccp.Tags != nil { + objectMap["tags"] = ccp.Tags + } + return json.Marshal(objectMap) +} + +// CertificateImportParameters the certificate import parameters. +type CertificateImportParameters struct { + // Base64EncodedCertificate - A PEM file or a base64-encoded PFX file. PEM files need to contain the private key. + Base64EncodedCertificate *string `json:"value,omitempty"` + // Password - If the private key in base64EncodedCertificate is encrypted, the password used for encryption. + Password *string `json:"pwd,omitempty"` + // CertificatePolicy - The management policy for the certificate. + CertificatePolicy *CertificatePolicy `json:"policy,omitempty"` + // CertificateAttributes - The attributes of the certificate (optional). + CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for CertificateImportParameters. +func (cip CertificateImportParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if cip.Base64EncodedCertificate != nil { + objectMap["value"] = cip.Base64EncodedCertificate + } + if cip.Password != nil { + objectMap["pwd"] = cip.Password + } + if cip.CertificatePolicy != nil { + objectMap["policy"] = cip.CertificatePolicy + } + if cip.CertificateAttributes != nil { + objectMap["attributes"] = cip.CertificateAttributes + } + if cip.Tags != nil { + objectMap["tags"] = cip.Tags + } + return json.Marshal(objectMap) +} + +// CertificateIssuerItem the certificate issuer item containing certificate issuer metadata. +type CertificateIssuerItem struct { + // ID - Certificate Identifier. + ID *string `json:"id,omitempty"` + // Provider - The issuer provider. + Provider *string `json:"provider,omitempty"` +} + +// CertificateIssuerListResult the certificate issuer list result. +type CertificateIssuerListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; A response message containing a list of certificate issuers in the key vault along with a link to the next page of certificate issuers. + Value *[]CertificateIssuerItem `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of certificate issuers. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for CertificateIssuerListResult. +func (cilr CertificateIssuerListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// CertificateIssuerListResultIterator provides access to a complete listing of CertificateIssuerItem +// values. +type CertificateIssuerListResultIterator struct { + i int + page CertificateIssuerListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *CertificateIssuerListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CertificateIssuerListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *CertificateIssuerListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter CertificateIssuerListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter CertificateIssuerListResultIterator) Response() CertificateIssuerListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter CertificateIssuerListResultIterator) Value() CertificateIssuerItem { + if !iter.page.NotDone() { + return CertificateIssuerItem{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the CertificateIssuerListResultIterator type. +func NewCertificateIssuerListResultIterator(page CertificateIssuerListResultPage) CertificateIssuerListResultIterator { + return CertificateIssuerListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (cilr CertificateIssuerListResult) IsEmpty() bool { + return cilr.Value == nil || len(*cilr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (cilr CertificateIssuerListResult) hasNextLink() bool { + return cilr.NextLink != nil && len(*cilr.NextLink) != 0 +} + +// certificateIssuerListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (cilr CertificateIssuerListResult) certificateIssuerListResultPreparer(ctx context.Context) (*http.Request, error) { + if !cilr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(cilr.NextLink))) +} + +// CertificateIssuerListResultPage contains a page of CertificateIssuerItem values. +type CertificateIssuerListResultPage struct { + fn func(context.Context, CertificateIssuerListResult) (CertificateIssuerListResult, error) + cilr CertificateIssuerListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *CertificateIssuerListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CertificateIssuerListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.cilr) + if err != nil { + return err + } + page.cilr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *CertificateIssuerListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page CertificateIssuerListResultPage) NotDone() bool { + return !page.cilr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page CertificateIssuerListResultPage) Response() CertificateIssuerListResult { + return page.cilr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page CertificateIssuerListResultPage) Values() []CertificateIssuerItem { + if page.cilr.IsEmpty() { + return nil + } + return *page.cilr.Value +} + +// Creates a new instance of the CertificateIssuerListResultPage type. +func NewCertificateIssuerListResultPage(cur CertificateIssuerListResult, getNextPage func(context.Context, CertificateIssuerListResult) (CertificateIssuerListResult, error)) CertificateIssuerListResultPage { + return CertificateIssuerListResultPage{ + fn: getNextPage, + cilr: cur, + } +} + +// CertificateIssuerSetParameters the certificate issuer set parameters. +type CertificateIssuerSetParameters struct { + // Provider - The issuer provider. + Provider *string `json:"provider,omitempty"` + // Credentials - The credentials to be used for the issuer. + Credentials *IssuerCredentials `json:"credentials,omitempty"` + // OrganizationDetails - Details of the organization as provided to the issuer. + OrganizationDetails *OrganizationDetails `json:"org_details,omitempty"` + // Attributes - Attributes of the issuer object. + Attributes *IssuerAttributes `json:"attributes,omitempty"` +} + +// CertificateIssuerUpdateParameters the certificate issuer update parameters. +type CertificateIssuerUpdateParameters struct { + // Provider - The issuer provider. + Provider *string `json:"provider,omitempty"` + // Credentials - The credentials to be used for the issuer. + Credentials *IssuerCredentials `json:"credentials,omitempty"` + // OrganizationDetails - Details of the organization as provided to the issuer. + OrganizationDetails *OrganizationDetails `json:"org_details,omitempty"` + // Attributes - Attributes of the issuer object. + Attributes *IssuerAttributes `json:"attributes,omitempty"` +} + +// CertificateItem the certificate item containing certificate metadata. +type CertificateItem struct { + // ID - Certificate identifier. + ID *string `json:"id,omitempty"` + // Attributes - The certificate management attributes. + Attributes *CertificateAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` + // X509Thumbprint - Thumbprint of the certificate. (a URL-encoded base64 string) + X509Thumbprint *string `json:"x5t,omitempty"` +} + +// MarshalJSON is the custom marshaler for CertificateItem. +func (ci CertificateItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ci.ID != nil { + objectMap["id"] = ci.ID + } + if ci.Attributes != nil { + objectMap["attributes"] = ci.Attributes + } + if ci.Tags != nil { + objectMap["tags"] = ci.Tags + } + if ci.X509Thumbprint != nil { + objectMap["x5t"] = ci.X509Thumbprint + } + return json.Marshal(objectMap) +} + +// CertificateListResult the certificate list result. +type CertificateListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; A response message containing a list of certificates in the key vault along with a link to the next page of certificates. + Value *[]CertificateItem `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of certificates. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for CertificateListResult. +func (clr CertificateListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// CertificateListResultIterator provides access to a complete listing of CertificateItem values. +type CertificateListResultIterator struct { + i int + page CertificateListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *CertificateListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CertificateListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *CertificateListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter CertificateListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter CertificateListResultIterator) Response() CertificateListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter CertificateListResultIterator) Value() CertificateItem { + if !iter.page.NotDone() { + return CertificateItem{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the CertificateListResultIterator type. +func NewCertificateListResultIterator(page CertificateListResultPage) CertificateListResultIterator { + return CertificateListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (clr CertificateListResult) IsEmpty() bool { + return clr.Value == nil || len(*clr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (clr CertificateListResult) hasNextLink() bool { + return clr.NextLink != nil && len(*clr.NextLink) != 0 +} + +// certificateListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (clr CertificateListResult) certificateListResultPreparer(ctx context.Context) (*http.Request, error) { + if !clr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(clr.NextLink))) +} + +// CertificateListResultPage contains a page of CertificateItem values. +type CertificateListResultPage struct { + fn func(context.Context, CertificateListResult) (CertificateListResult, error) + clr CertificateListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *CertificateListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CertificateListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.clr) + if err != nil { + return err + } + page.clr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *CertificateListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page CertificateListResultPage) NotDone() bool { + return !page.clr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page CertificateListResultPage) Response() CertificateListResult { + return page.clr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page CertificateListResultPage) Values() []CertificateItem { + if page.clr.IsEmpty() { + return nil + } + return *page.clr.Value +} + +// Creates a new instance of the CertificateListResultPage type. +func NewCertificateListResultPage(cur CertificateListResult, getNextPage func(context.Context, CertificateListResult) (CertificateListResult, error)) CertificateListResultPage { + return CertificateListResultPage{ + fn: getNextPage, + clr: cur, + } +} + +// CertificateMergeParameters the certificate merge parameters +type CertificateMergeParameters struct { + // X509Certificates - The certificate or the certificate chain to merge. + X509Certificates *[][]byte `json:"x5c,omitempty"` + // CertificateAttributes - The attributes of the certificate (optional). + CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for CertificateMergeParameters. +func (cmp CertificateMergeParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if cmp.X509Certificates != nil { + objectMap["x5c"] = cmp.X509Certificates + } + if cmp.CertificateAttributes != nil { + objectMap["attributes"] = cmp.CertificateAttributes + } + if cmp.Tags != nil { + objectMap["tags"] = cmp.Tags + } + return json.Marshal(objectMap) +} + +// CertificateOperation a certificate operation is returned in case of asynchronous requests. +type CertificateOperation struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; The certificate id. + ID *string `json:"id,omitempty"` + // IssuerParameters - Parameters for the issuer of the X509 component of a certificate. + IssuerParameters *IssuerParameters `json:"issuer,omitempty"` + // Csr - The certificate signing request (CSR) that is being used in the certificate operation. + Csr *[]byte `json:"csr,omitempty"` + // CancellationRequested - Indicates if cancellation was requested on the certificate operation. + CancellationRequested *bool `json:"cancellation_requested,omitempty"` + // Status - Status of the certificate operation. + Status *string `json:"status,omitempty"` + // StatusDetails - The status details of the certificate operation. + StatusDetails *string `json:"status_details,omitempty"` + // Error - Error encountered, if any, during the certificate operation. + Error *Error `json:"error,omitempty"` + // Target - Location which contains the result of the certificate operation. + Target *string `json:"target,omitempty"` + // RequestID - Identifier for the certificate operation. + RequestID *string `json:"request_id,omitempty"` +} + +// MarshalJSON is the custom marshaler for CertificateOperation. +func (co CertificateOperation) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if co.IssuerParameters != nil { + objectMap["issuer"] = co.IssuerParameters + } + if co.Csr != nil { + objectMap["csr"] = co.Csr + } + if co.CancellationRequested != nil { + objectMap["cancellation_requested"] = co.CancellationRequested + } + if co.Status != nil { + objectMap["status"] = co.Status + } + if co.StatusDetails != nil { + objectMap["status_details"] = co.StatusDetails + } + if co.Error != nil { + objectMap["error"] = co.Error + } + if co.Target != nil { + objectMap["target"] = co.Target + } + if co.RequestID != nil { + objectMap["request_id"] = co.RequestID + } + return json.Marshal(objectMap) +} + +// CertificateOperationUpdateParameter the certificate operation update parameters. +type CertificateOperationUpdateParameter struct { + // CancellationRequested - Indicates if cancellation was requested on the certificate operation. + CancellationRequested *bool `json:"cancellation_requested,omitempty"` +} + +// CertificatePolicy management policy for a certificate. +type CertificatePolicy struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; The certificate id. + ID *string `json:"id,omitempty"` + // KeyProperties - Properties of the key backing a certificate. + KeyProperties *KeyProperties `json:"key_props,omitempty"` + // SecretProperties - Properties of the secret backing a certificate. + SecretProperties *SecretProperties `json:"secret_props,omitempty"` + // X509CertificateProperties - Properties of the X509 component of a certificate. + X509CertificateProperties *X509CertificateProperties `json:"x509_props,omitempty"` + // LifetimeActions - Actions that will be performed by Key Vault over the lifetime of a certificate. + LifetimeActions *[]LifetimeAction `json:"lifetime_actions,omitempty"` + // IssuerParameters - Parameters for the issuer of the X509 component of a certificate. + IssuerParameters *IssuerParameters `json:"issuer,omitempty"` + // Attributes - The certificate attributes. + Attributes *CertificateAttributes `json:"attributes,omitempty"` +} + +// MarshalJSON is the custom marshaler for CertificatePolicy. +func (cp CertificatePolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if cp.KeyProperties != nil { + objectMap["key_props"] = cp.KeyProperties + } + if cp.SecretProperties != nil { + objectMap["secret_props"] = cp.SecretProperties + } + if cp.X509CertificateProperties != nil { + objectMap["x509_props"] = cp.X509CertificateProperties + } + if cp.LifetimeActions != nil { + objectMap["lifetime_actions"] = cp.LifetimeActions + } + if cp.IssuerParameters != nil { + objectMap["issuer"] = cp.IssuerParameters + } + if cp.Attributes != nil { + objectMap["attributes"] = cp.Attributes + } + return json.Marshal(objectMap) +} + +// CertificateRestoreParameters the certificate restore parameters. +type CertificateRestoreParameters struct { + // CertificateBundleBackup - The backup blob associated with a certificate bundle. (a URL-encoded base64 string) + CertificateBundleBackup *string `json:"value,omitempty"` +} + +// CertificateUpdateParameters the certificate update parameters. +type CertificateUpdateParameters struct { + // CertificatePolicy - The management policy for the certificate. + CertificatePolicy *CertificatePolicy `json:"policy,omitempty"` + // CertificateAttributes - The attributes of the certificate (optional). + CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for CertificateUpdateParameters. +func (cup CertificateUpdateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if cup.CertificatePolicy != nil { + objectMap["policy"] = cup.CertificatePolicy + } + if cup.CertificateAttributes != nil { + objectMap["attributes"] = cup.CertificateAttributes + } + if cup.Tags != nil { + objectMap["tags"] = cup.Tags + } + return json.Marshal(objectMap) +} + +// Contact the contact information for the vault certificates. +type Contact struct { + // EmailAddress - Email address. + EmailAddress *string `json:"email,omitempty"` + // Name - Name. + Name *string `json:"name,omitempty"` + // Phone - Phone number. + Phone *string `json:"phone,omitempty"` +} + +// Contacts the contacts for the vault certificates. +type Contacts struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; Identifier for the contacts collection. + ID *string `json:"id,omitempty"` + // ContactList - The contact list for the vault certificates. + ContactList *[]Contact `json:"contacts,omitempty"` +} + +// MarshalJSON is the custom marshaler for Contacts. +func (c Contacts) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if c.ContactList != nil { + objectMap["contacts"] = c.ContactList + } + return json.Marshal(objectMap) +} + +// DeletedCertificateBundle a Deleted Certificate consisting of its previous id, attributes and its tags, +// as well as information on when it will be purged. +type DeletedCertificateBundle struct { + autorest.Response `json:"-"` + // RecoveryID - The url of the recovery object, used to identify and recover the deleted certificate. + RecoveryID *string `json:"recoveryId,omitempty"` + // ScheduledPurgeDate - READ-ONLY; The time when the certificate is scheduled to be purged, in UTC + ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` + // DeletedDate - READ-ONLY; The time when the certificate was deleted, in UTC + DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` + // ID - READ-ONLY; The certificate id. + ID *string `json:"id,omitempty"` + // Kid - READ-ONLY; The key id. + Kid *string `json:"kid,omitempty"` + // Sid - READ-ONLY; The secret id. + Sid *string `json:"sid,omitempty"` + // X509Thumbprint - READ-ONLY; Thumbprint of the certificate. (a URL-encoded base64 string) + X509Thumbprint *string `json:"x5t,omitempty"` + // Policy - READ-ONLY; The management policy. + Policy *CertificatePolicy `json:"policy,omitempty"` + // Cer - CER contents of x509 certificate. + Cer *[]byte `json:"cer,omitempty"` + // ContentType - The content type of the secret. + ContentType *string `json:"contentType,omitempty"` + // Attributes - The certificate attributes. + Attributes *CertificateAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DeletedCertificateBundle. +func (dcb DeletedCertificateBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dcb.RecoveryID != nil { + objectMap["recoveryId"] = dcb.RecoveryID + } + if dcb.Cer != nil { + objectMap["cer"] = dcb.Cer + } + if dcb.ContentType != nil { + objectMap["contentType"] = dcb.ContentType + } + if dcb.Attributes != nil { + objectMap["attributes"] = dcb.Attributes + } + if dcb.Tags != nil { + objectMap["tags"] = dcb.Tags + } + return json.Marshal(objectMap) +} + +// DeletedCertificateItem the deleted certificate item containing metadata about the deleted certificate. +type DeletedCertificateItem struct { + // RecoveryID - The url of the recovery object, used to identify and recover the deleted certificate. + RecoveryID *string `json:"recoveryId,omitempty"` + // ScheduledPurgeDate - READ-ONLY; The time when the certificate is scheduled to be purged, in UTC + ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` + // DeletedDate - READ-ONLY; The time when the certificate was deleted, in UTC + DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` + // ID - Certificate identifier. + ID *string `json:"id,omitempty"` + // Attributes - The certificate management attributes. + Attributes *CertificateAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` + // X509Thumbprint - Thumbprint of the certificate. (a URL-encoded base64 string) + X509Thumbprint *string `json:"x5t,omitempty"` +} + +// MarshalJSON is the custom marshaler for DeletedCertificateItem. +func (dci DeletedCertificateItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dci.RecoveryID != nil { + objectMap["recoveryId"] = dci.RecoveryID + } + if dci.ID != nil { + objectMap["id"] = dci.ID + } + if dci.Attributes != nil { + objectMap["attributes"] = dci.Attributes + } + if dci.Tags != nil { + objectMap["tags"] = dci.Tags + } + if dci.X509Thumbprint != nil { + objectMap["x5t"] = dci.X509Thumbprint + } + return json.Marshal(objectMap) +} + +// DeletedCertificateListResult a list of certificates that have been deleted in this vault. +type DeletedCertificateListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; A response message containing a list of deleted certificates in the vault along with a link to the next page of deleted certificates + Value *[]DeletedCertificateItem `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of deleted certificates. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for DeletedCertificateListResult. +func (dclr DeletedCertificateListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// DeletedCertificateListResultIterator provides access to a complete listing of DeletedCertificateItem +// values. +type DeletedCertificateListResultIterator struct { + i int + page DeletedCertificateListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DeletedCertificateListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedCertificateListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DeletedCertificateListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DeletedCertificateListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DeletedCertificateListResultIterator) Response() DeletedCertificateListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DeletedCertificateListResultIterator) Value() DeletedCertificateItem { + if !iter.page.NotDone() { + return DeletedCertificateItem{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DeletedCertificateListResultIterator type. +func NewDeletedCertificateListResultIterator(page DeletedCertificateListResultPage) DeletedCertificateListResultIterator { + return DeletedCertificateListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dclr DeletedCertificateListResult) IsEmpty() bool { + return dclr.Value == nil || len(*dclr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (dclr DeletedCertificateListResult) hasNextLink() bool { + return dclr.NextLink != nil && len(*dclr.NextLink) != 0 +} + +// deletedCertificateListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dclr DeletedCertificateListResult) deletedCertificateListResultPreparer(ctx context.Context) (*http.Request, error) { + if !dclr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dclr.NextLink))) +} + +// DeletedCertificateListResultPage contains a page of DeletedCertificateItem values. +type DeletedCertificateListResultPage struct { + fn func(context.Context, DeletedCertificateListResult) (DeletedCertificateListResult, error) + dclr DeletedCertificateListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DeletedCertificateListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedCertificateListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.dclr) + if err != nil { + return err + } + page.dclr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DeletedCertificateListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DeletedCertificateListResultPage) NotDone() bool { + return !page.dclr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DeletedCertificateListResultPage) Response() DeletedCertificateListResult { + return page.dclr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DeletedCertificateListResultPage) Values() []DeletedCertificateItem { + if page.dclr.IsEmpty() { + return nil + } + return *page.dclr.Value +} + +// Creates a new instance of the DeletedCertificateListResultPage type. +func NewDeletedCertificateListResultPage(cur DeletedCertificateListResult, getNextPage func(context.Context, DeletedCertificateListResult) (DeletedCertificateListResult, error)) DeletedCertificateListResultPage { + return DeletedCertificateListResultPage{ + fn: getNextPage, + dclr: cur, + } +} + +// DeletedKeyBundle a DeletedKeyBundle consisting of a WebKey plus its Attributes and deletion info +type DeletedKeyBundle struct { + autorest.Response `json:"-"` + // RecoveryID - The url of the recovery object, used to identify and recover the deleted key. + RecoveryID *string `json:"recoveryId,omitempty"` + // ScheduledPurgeDate - READ-ONLY; The time when the key is scheduled to be purged, in UTC + ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` + // DeletedDate - READ-ONLY; The time when the key was deleted, in UTC + DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` + // Key - The Json web key. + Key *JSONWebKey `json:"key,omitempty"` + // Attributes - The key management attributes. + Attributes *KeyAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` + // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. + Managed *bool `json:"managed,omitempty"` +} + +// MarshalJSON is the custom marshaler for DeletedKeyBundle. +func (dkb DeletedKeyBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dkb.RecoveryID != nil { + objectMap["recoveryId"] = dkb.RecoveryID + } + if dkb.Key != nil { + objectMap["key"] = dkb.Key + } + if dkb.Attributes != nil { + objectMap["attributes"] = dkb.Attributes + } + if dkb.Tags != nil { + objectMap["tags"] = dkb.Tags + } + return json.Marshal(objectMap) +} + +// DeletedKeyItem the deleted key item containing the deleted key metadata and information about deletion. +type DeletedKeyItem struct { + // RecoveryID - The url of the recovery object, used to identify and recover the deleted key. + RecoveryID *string `json:"recoveryId,omitempty"` + // ScheduledPurgeDate - READ-ONLY; The time when the key is scheduled to be purged, in UTC + ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` + // DeletedDate - READ-ONLY; The time when the key was deleted, in UTC + DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` + // Kid - Key identifier. + Kid *string `json:"kid,omitempty"` + // Attributes - The key management attributes. + Attributes *KeyAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` + // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. + Managed *bool `json:"managed,omitempty"` +} + +// MarshalJSON is the custom marshaler for DeletedKeyItem. +func (dki DeletedKeyItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dki.RecoveryID != nil { + objectMap["recoveryId"] = dki.RecoveryID + } + if dki.Kid != nil { + objectMap["kid"] = dki.Kid + } + if dki.Attributes != nil { + objectMap["attributes"] = dki.Attributes + } + if dki.Tags != nil { + objectMap["tags"] = dki.Tags + } + return json.Marshal(objectMap) +} + +// DeletedKeyListResult a list of keys that have been deleted in this vault. +type DeletedKeyListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; A response message containing a list of deleted keys in the vault along with a link to the next page of deleted keys + Value *[]DeletedKeyItem `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of deleted keys. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for DeletedKeyListResult. +func (dklr DeletedKeyListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// DeletedKeyListResultIterator provides access to a complete listing of DeletedKeyItem values. +type DeletedKeyListResultIterator struct { + i int + page DeletedKeyListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DeletedKeyListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedKeyListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DeletedKeyListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DeletedKeyListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DeletedKeyListResultIterator) Response() DeletedKeyListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DeletedKeyListResultIterator) Value() DeletedKeyItem { + if !iter.page.NotDone() { + return DeletedKeyItem{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DeletedKeyListResultIterator type. +func NewDeletedKeyListResultIterator(page DeletedKeyListResultPage) DeletedKeyListResultIterator { + return DeletedKeyListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dklr DeletedKeyListResult) IsEmpty() bool { + return dklr.Value == nil || len(*dklr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (dklr DeletedKeyListResult) hasNextLink() bool { + return dklr.NextLink != nil && len(*dklr.NextLink) != 0 +} + +// deletedKeyListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dklr DeletedKeyListResult) deletedKeyListResultPreparer(ctx context.Context) (*http.Request, error) { + if !dklr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dklr.NextLink))) +} + +// DeletedKeyListResultPage contains a page of DeletedKeyItem values. +type DeletedKeyListResultPage struct { + fn func(context.Context, DeletedKeyListResult) (DeletedKeyListResult, error) + dklr DeletedKeyListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DeletedKeyListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedKeyListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.dklr) + if err != nil { + return err + } + page.dklr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DeletedKeyListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DeletedKeyListResultPage) NotDone() bool { + return !page.dklr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DeletedKeyListResultPage) Response() DeletedKeyListResult { + return page.dklr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DeletedKeyListResultPage) Values() []DeletedKeyItem { + if page.dklr.IsEmpty() { + return nil + } + return *page.dklr.Value +} + +// Creates a new instance of the DeletedKeyListResultPage type. +func NewDeletedKeyListResultPage(cur DeletedKeyListResult, getNextPage func(context.Context, DeletedKeyListResult) (DeletedKeyListResult, error)) DeletedKeyListResultPage { + return DeletedKeyListResultPage{ + fn: getNextPage, + dklr: cur, + } +} + +// DeletedSasDefinitionBundle a deleted SAS definition bundle consisting of its previous id, attributes and +// its tags, as well as information on when it will be purged. +type DeletedSasDefinitionBundle struct { + autorest.Response `json:"-"` + // RecoveryID - The url of the recovery object, used to identify and recover the deleted SAS definition. + RecoveryID *string `json:"recoveryId,omitempty"` + // ScheduledPurgeDate - READ-ONLY; The time when the SAS definition is scheduled to be purged, in UTC + ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` + // DeletedDate - READ-ONLY; The time when the SAS definition was deleted, in UTC + DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` + // ID - READ-ONLY; The SAS definition id. + ID *string `json:"id,omitempty"` + // SecretID - READ-ONLY; Storage account SAS definition secret id. + SecretID *string `json:"sid,omitempty"` + // TemplateURI - READ-ONLY; The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. + TemplateURI *string `json:"templateUri,omitempty"` + // SasType - READ-ONLY; The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' + SasType SasTokenType `json:"sasType,omitempty"` + // ValidityPeriod - READ-ONLY; The validity period of SAS tokens created according to the SAS definition. + ValidityPeriod *string `json:"validityPeriod,omitempty"` + // Attributes - READ-ONLY; The SAS definition attributes. + Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` + // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DeletedSasDefinitionBundle. +func (dsdb DeletedSasDefinitionBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dsdb.RecoveryID != nil { + objectMap["recoveryId"] = dsdb.RecoveryID + } + return json.Marshal(objectMap) +} + +// DeletedSasDefinitionItem the deleted SAS definition item containing metadata about the deleted SAS +// definition. +type DeletedSasDefinitionItem struct { + // RecoveryID - The url of the recovery object, used to identify and recover the deleted SAS definition. + RecoveryID *string `json:"recoveryId,omitempty"` + // ScheduledPurgeDate - READ-ONLY; The time when the SAS definition is scheduled to be purged, in UTC + ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` + // DeletedDate - READ-ONLY; The time when the SAS definition was deleted, in UTC + DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` + // ID - READ-ONLY; The storage SAS identifier. + ID *string `json:"id,omitempty"` + // SecretID - READ-ONLY; The storage account SAS definition secret id. + SecretID *string `json:"sid,omitempty"` + // Attributes - READ-ONLY; The SAS definition management attributes. + Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` + // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DeletedSasDefinitionItem. +func (dsdi DeletedSasDefinitionItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dsdi.RecoveryID != nil { + objectMap["recoveryId"] = dsdi.RecoveryID + } + return json.Marshal(objectMap) +} + +// DeletedSasDefinitionListResult the deleted SAS definition list result +type DeletedSasDefinitionListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; A response message containing a list of the deleted SAS definitions in the vault along with a link to the next page of deleted sas definitions + Value *[]DeletedSasDefinitionItem `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of deleted SAS definitions. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for DeletedSasDefinitionListResult. +func (dsdlr DeletedSasDefinitionListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// DeletedSasDefinitionListResultIterator provides access to a complete listing of DeletedSasDefinitionItem +// values. +type DeletedSasDefinitionListResultIterator struct { + i int + page DeletedSasDefinitionListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DeletedSasDefinitionListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSasDefinitionListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DeletedSasDefinitionListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DeletedSasDefinitionListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DeletedSasDefinitionListResultIterator) Response() DeletedSasDefinitionListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DeletedSasDefinitionListResultIterator) Value() DeletedSasDefinitionItem { + if !iter.page.NotDone() { + return DeletedSasDefinitionItem{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DeletedSasDefinitionListResultIterator type. +func NewDeletedSasDefinitionListResultIterator(page DeletedSasDefinitionListResultPage) DeletedSasDefinitionListResultIterator { + return DeletedSasDefinitionListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dsdlr DeletedSasDefinitionListResult) IsEmpty() bool { + return dsdlr.Value == nil || len(*dsdlr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (dsdlr DeletedSasDefinitionListResult) hasNextLink() bool { + return dsdlr.NextLink != nil && len(*dsdlr.NextLink) != 0 +} + +// deletedSasDefinitionListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dsdlr DeletedSasDefinitionListResult) deletedSasDefinitionListResultPreparer(ctx context.Context) (*http.Request, error) { + if !dsdlr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dsdlr.NextLink))) +} + +// DeletedSasDefinitionListResultPage contains a page of DeletedSasDefinitionItem values. +type DeletedSasDefinitionListResultPage struct { + fn func(context.Context, DeletedSasDefinitionListResult) (DeletedSasDefinitionListResult, error) + dsdlr DeletedSasDefinitionListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DeletedSasDefinitionListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSasDefinitionListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.dsdlr) + if err != nil { + return err + } + page.dsdlr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DeletedSasDefinitionListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DeletedSasDefinitionListResultPage) NotDone() bool { + return !page.dsdlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DeletedSasDefinitionListResultPage) Response() DeletedSasDefinitionListResult { + return page.dsdlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DeletedSasDefinitionListResultPage) Values() []DeletedSasDefinitionItem { + if page.dsdlr.IsEmpty() { + return nil + } + return *page.dsdlr.Value +} + +// Creates a new instance of the DeletedSasDefinitionListResultPage type. +func NewDeletedSasDefinitionListResultPage(cur DeletedSasDefinitionListResult, getNextPage func(context.Context, DeletedSasDefinitionListResult) (DeletedSasDefinitionListResult, error)) DeletedSasDefinitionListResultPage { + return DeletedSasDefinitionListResultPage{ + fn: getNextPage, + dsdlr: cur, + } +} + +// DeletedSecretBundle a Deleted Secret consisting of its previous id, attributes and its tags, as well as +// information on when it will be purged. +type DeletedSecretBundle struct { + autorest.Response `json:"-"` + // RecoveryID - The url of the recovery object, used to identify and recover the deleted secret. + RecoveryID *string `json:"recoveryId,omitempty"` + // ScheduledPurgeDate - READ-ONLY; The time when the secret is scheduled to be purged, in UTC + ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` + // DeletedDate - READ-ONLY; The time when the secret was deleted, in UTC + DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` + // Value - The secret value. + Value *string `json:"value,omitempty"` + // ID - The secret id. + ID *string `json:"id,omitempty"` + // ContentType - The content type of the secret. + ContentType *string `json:"contentType,omitempty"` + // Attributes - The secret management attributes. + Attributes *SecretAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` + // Kid - READ-ONLY; If this is a secret backing a KV certificate, then this field specifies the corresponding key backing the KV certificate. + Kid *string `json:"kid,omitempty"` + // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a secret backing a certificate, then managed will be true. + Managed *bool `json:"managed,omitempty"` +} + +// MarshalJSON is the custom marshaler for DeletedSecretBundle. +func (dsb DeletedSecretBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dsb.RecoveryID != nil { + objectMap["recoveryId"] = dsb.RecoveryID + } + if dsb.Value != nil { + objectMap["value"] = dsb.Value + } + if dsb.ID != nil { + objectMap["id"] = dsb.ID + } + if dsb.ContentType != nil { + objectMap["contentType"] = dsb.ContentType + } + if dsb.Attributes != nil { + objectMap["attributes"] = dsb.Attributes + } + if dsb.Tags != nil { + objectMap["tags"] = dsb.Tags + } + return json.Marshal(objectMap) +} + +// DeletedSecretItem the deleted secret item containing metadata about the deleted secret. +type DeletedSecretItem struct { + // RecoveryID - The url of the recovery object, used to identify and recover the deleted secret. + RecoveryID *string `json:"recoveryId,omitempty"` + // ScheduledPurgeDate - READ-ONLY; The time when the secret is scheduled to be purged, in UTC + ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` + // DeletedDate - READ-ONLY; The time when the secret was deleted, in UTC + DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` + // ID - Secret identifier. + ID *string `json:"id,omitempty"` + // Attributes - The secret management attributes. + Attributes *SecretAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` + // ContentType - Type of the secret value such as a password. + ContentType *string `json:"contentType,omitempty"` + // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. + Managed *bool `json:"managed,omitempty"` +} + +// MarshalJSON is the custom marshaler for DeletedSecretItem. +func (dsi DeletedSecretItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dsi.RecoveryID != nil { + objectMap["recoveryId"] = dsi.RecoveryID + } + if dsi.ID != nil { + objectMap["id"] = dsi.ID + } + if dsi.Attributes != nil { + objectMap["attributes"] = dsi.Attributes + } + if dsi.Tags != nil { + objectMap["tags"] = dsi.Tags + } + if dsi.ContentType != nil { + objectMap["contentType"] = dsi.ContentType + } + return json.Marshal(objectMap) +} + +// DeletedSecretListResult the deleted secret list result +type DeletedSecretListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; A response message containing a list of the deleted secrets in the vault along with a link to the next page of deleted secrets + Value *[]DeletedSecretItem `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of deleted secrets. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for DeletedSecretListResult. +func (dslr DeletedSecretListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// DeletedSecretListResultIterator provides access to a complete listing of DeletedSecretItem values. +type DeletedSecretListResultIterator struct { + i int + page DeletedSecretListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DeletedSecretListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSecretListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DeletedSecretListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DeletedSecretListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DeletedSecretListResultIterator) Response() DeletedSecretListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DeletedSecretListResultIterator) Value() DeletedSecretItem { + if !iter.page.NotDone() { + return DeletedSecretItem{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DeletedSecretListResultIterator type. +func NewDeletedSecretListResultIterator(page DeletedSecretListResultPage) DeletedSecretListResultIterator { + return DeletedSecretListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dslr DeletedSecretListResult) IsEmpty() bool { + return dslr.Value == nil || len(*dslr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (dslr DeletedSecretListResult) hasNextLink() bool { + return dslr.NextLink != nil && len(*dslr.NextLink) != 0 +} + +// deletedSecretListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dslr DeletedSecretListResult) deletedSecretListResultPreparer(ctx context.Context) (*http.Request, error) { + if !dslr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dslr.NextLink))) +} + +// DeletedSecretListResultPage contains a page of DeletedSecretItem values. +type DeletedSecretListResultPage struct { + fn func(context.Context, DeletedSecretListResult) (DeletedSecretListResult, error) + dslr DeletedSecretListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DeletedSecretListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSecretListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.dslr) + if err != nil { + return err + } + page.dslr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DeletedSecretListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DeletedSecretListResultPage) NotDone() bool { + return !page.dslr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DeletedSecretListResultPage) Response() DeletedSecretListResult { + return page.dslr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DeletedSecretListResultPage) Values() []DeletedSecretItem { + if page.dslr.IsEmpty() { + return nil + } + return *page.dslr.Value +} + +// Creates a new instance of the DeletedSecretListResultPage type. +func NewDeletedSecretListResultPage(cur DeletedSecretListResult, getNextPage func(context.Context, DeletedSecretListResult) (DeletedSecretListResult, error)) DeletedSecretListResultPage { + return DeletedSecretListResultPage{ + fn: getNextPage, + dslr: cur, + } +} + +// DeletedStorageAccountItem the deleted storage account item containing metadata about the deleted storage +// account. +type DeletedStorageAccountItem struct { + // RecoveryID - The url of the recovery object, used to identify and recover the deleted storage account. + RecoveryID *string `json:"recoveryId,omitempty"` + // ScheduledPurgeDate - READ-ONLY; The time when the storage account is scheduled to be purged, in UTC + ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` + // DeletedDate - READ-ONLY; The time when the storage account was deleted, in UTC + DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` + // ID - READ-ONLY; Storage identifier. + ID *string `json:"id,omitempty"` + // ResourceID - READ-ONLY; Storage account resource Id. + ResourceID *string `json:"resourceId,omitempty"` + // Attributes - READ-ONLY; The storage account management attributes. + Attributes *StorageAccountAttributes `json:"attributes,omitempty"` + // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DeletedStorageAccountItem. +func (dsai DeletedStorageAccountItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dsai.RecoveryID != nil { + objectMap["recoveryId"] = dsai.RecoveryID + } + return json.Marshal(objectMap) +} + +// DeletedStorageBundle a deleted storage account bundle consisting of its previous id, attributes and its +// tags, as well as information on when it will be purged. +type DeletedStorageBundle struct { + autorest.Response `json:"-"` + // RecoveryID - The url of the recovery object, used to identify and recover the deleted storage account. + RecoveryID *string `json:"recoveryId,omitempty"` + // ScheduledPurgeDate - READ-ONLY; The time when the storage account is scheduled to be purged, in UTC + ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` + // DeletedDate - READ-ONLY; The time when the storage account was deleted, in UTC + DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` + // ID - READ-ONLY; The storage account id. + ID *string `json:"id,omitempty"` + // ResourceID - READ-ONLY; The storage account resource id. + ResourceID *string `json:"resourceId,omitempty"` + // ActiveKeyName - READ-ONLY; The current active storage account key name. + ActiveKeyName *string `json:"activeKeyName,omitempty"` + // AutoRegenerateKey - READ-ONLY; whether keyvault should manage the storage account for the user. + AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` + // RegenerationPeriod - READ-ONLY; The key regeneration time duration specified in ISO-8601 format. + RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` + // Attributes - READ-ONLY; The storage account attributes. + Attributes *StorageAccountAttributes `json:"attributes,omitempty"` + // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DeletedStorageBundle. +func (dsb DeletedStorageBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dsb.RecoveryID != nil { + objectMap["recoveryId"] = dsb.RecoveryID + } + return json.Marshal(objectMap) +} + +// DeletedStorageListResult the deleted storage account list result +type DeletedStorageListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; A response message containing a list of the deleted storage accounts in the vault along with a link to the next page of deleted storage accounts + Value *[]DeletedStorageAccountItem `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of deleted storage accounts. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for DeletedStorageListResult. +func (dslr DeletedStorageListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// DeletedStorageListResultIterator provides access to a complete listing of DeletedStorageAccountItem +// values. +type DeletedStorageListResultIterator struct { + i int + page DeletedStorageListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DeletedStorageListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedStorageListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DeletedStorageListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DeletedStorageListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DeletedStorageListResultIterator) Response() DeletedStorageListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DeletedStorageListResultIterator) Value() DeletedStorageAccountItem { + if !iter.page.NotDone() { + return DeletedStorageAccountItem{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DeletedStorageListResultIterator type. +func NewDeletedStorageListResultIterator(page DeletedStorageListResultPage) DeletedStorageListResultIterator { + return DeletedStorageListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dslr DeletedStorageListResult) IsEmpty() bool { + return dslr.Value == nil || len(*dslr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (dslr DeletedStorageListResult) hasNextLink() bool { + return dslr.NextLink != nil && len(*dslr.NextLink) != 0 +} + +// deletedStorageListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dslr DeletedStorageListResult) deletedStorageListResultPreparer(ctx context.Context) (*http.Request, error) { + if !dslr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dslr.NextLink))) +} + +// DeletedStorageListResultPage contains a page of DeletedStorageAccountItem values. +type DeletedStorageListResultPage struct { + fn func(context.Context, DeletedStorageListResult) (DeletedStorageListResult, error) + dslr DeletedStorageListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DeletedStorageListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedStorageListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.dslr) + if err != nil { + return err + } + page.dslr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DeletedStorageListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DeletedStorageListResultPage) NotDone() bool { + return !page.dslr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DeletedStorageListResultPage) Response() DeletedStorageListResult { + return page.dslr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DeletedStorageListResultPage) Values() []DeletedStorageAccountItem { + if page.dslr.IsEmpty() { + return nil + } + return *page.dslr.Value +} + +// Creates a new instance of the DeletedStorageListResultPage type. +func NewDeletedStorageListResultPage(cur DeletedStorageListResult, getNextPage func(context.Context, DeletedStorageListResult) (DeletedStorageListResult, error)) DeletedStorageListResultPage { + return DeletedStorageListResultPage{ + fn: getNextPage, + dslr: cur, + } +} + +// Error the key vault server error. +type Error struct { + // Code - READ-ONLY; The error code. + Code *string `json:"code,omitempty"` + // Message - READ-ONLY; The error message. + Message *string `json:"message,omitempty"` + // InnerError - READ-ONLY + InnerError *Error `json:"innererror,omitempty"` +} + +// MarshalJSON is the custom marshaler for Error. +func (e Error) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ErrorType the key vault error exception. +type ErrorType struct { + // Error - READ-ONLY + Error *Error `json:"error,omitempty"` +} + +// MarshalJSON is the custom marshaler for ErrorType. +func (et ErrorType) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// IssuerAttributes the attributes of an issuer managed by the Key Vault service. +type IssuerAttributes struct { + // Enabled - Determines whether the issuer is enabled. + Enabled *bool `json:"enabled,omitempty"` + // Created - READ-ONLY; Creation time in UTC. + Created *date.UnixTime `json:"created,omitempty"` + // Updated - READ-ONLY; Last updated time in UTC. + Updated *date.UnixTime `json:"updated,omitempty"` +} + +// MarshalJSON is the custom marshaler for IssuerAttributes. +func (ia IssuerAttributes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ia.Enabled != nil { + objectMap["enabled"] = ia.Enabled + } + return json.Marshal(objectMap) +} + +// IssuerBundle the issuer for Key Vault certificate. +type IssuerBundle struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; Identifier for the issuer object. + ID *string `json:"id,omitempty"` + // Provider - The issuer provider. + Provider *string `json:"provider,omitempty"` + // Credentials - The credentials to be used for the issuer. + Credentials *IssuerCredentials `json:"credentials,omitempty"` + // OrganizationDetails - Details of the organization as provided to the issuer. + OrganizationDetails *OrganizationDetails `json:"org_details,omitempty"` + // Attributes - Attributes of the issuer object. + Attributes *IssuerAttributes `json:"attributes,omitempty"` +} + +// MarshalJSON is the custom marshaler for IssuerBundle. +func (ib IssuerBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ib.Provider != nil { + objectMap["provider"] = ib.Provider + } + if ib.Credentials != nil { + objectMap["credentials"] = ib.Credentials + } + if ib.OrganizationDetails != nil { + objectMap["org_details"] = ib.OrganizationDetails + } + if ib.Attributes != nil { + objectMap["attributes"] = ib.Attributes + } + return json.Marshal(objectMap) +} + +// IssuerCredentials the credentials to be used for the certificate issuer. +type IssuerCredentials struct { + // AccountID - The user name/account name/account id. + AccountID *string `json:"account_id,omitempty"` + // Password - The password/secret/account key. + Password *string `json:"pwd,omitempty"` +} + +// IssuerParameters parameters for the issuer of the X509 component of a certificate. +type IssuerParameters struct { + // Name - Name of the referenced issuer object or reserved names; for example, 'Self' or 'Unknown'. + Name *string `json:"name,omitempty"` + // CertificateType - Certificate type as supported by the provider (optional); for example 'OV-SSL', 'EV-SSL' + CertificateType *string `json:"cty,omitempty"` + // CertificateTransparency - Indicates if the certificates generated under this policy should be published to certificate transparency logs. + CertificateTransparency *bool `json:"cert_transparency,omitempty"` +} + +// JSONWebKey as of http://tools.ietf.org/html/draft-ietf-jose-json-web-key-18 +type JSONWebKey struct { + // Kid - Key identifier. + Kid *string `json:"kid,omitempty"` + // Kty - JsonWebKey Key Type (kty), as defined in https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40. Possible values include: 'EC', 'ECHSM', 'RSA', 'RSAHSM', 'Oct' + Kty JSONWebKeyType `json:"kty,omitempty"` + KeyOps *[]string `json:"key_ops,omitempty"` + // N - RSA modulus. (a URL-encoded base64 string) + N *string `json:"n,omitempty"` + // E - RSA public exponent. (a URL-encoded base64 string) + E *string `json:"e,omitempty"` + // D - RSA private exponent, or the D component of an EC private key. (a URL-encoded base64 string) + D *string `json:"d,omitempty"` + // DP - RSA private key parameter. (a URL-encoded base64 string) + DP *string `json:"dp,omitempty"` + // DQ - RSA private key parameter. (a URL-encoded base64 string) + DQ *string `json:"dq,omitempty"` + // QI - RSA private key parameter. (a URL-encoded base64 string) + QI *string `json:"qi,omitempty"` + // P - RSA secret prime. (a URL-encoded base64 string) + P *string `json:"p,omitempty"` + // Q - RSA secret prime, with p < q. (a URL-encoded base64 string) + Q *string `json:"q,omitempty"` + // K - Symmetric key. (a URL-encoded base64 string) + K *string `json:"k,omitempty"` + // T - HSM Token, used with 'Bring Your Own Key'. (a URL-encoded base64 string) + T *string `json:"key_hsm,omitempty"` + // Crv - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'P256K' + Crv JSONWebKeyCurveName `json:"crv,omitempty"` + // X - X component of an EC public key. (a URL-encoded base64 string) + X *string `json:"x,omitempty"` + // Y - Y component of an EC public key. (a URL-encoded base64 string) + Y *string `json:"y,omitempty"` +} + +// KeyAttributes the attributes of a key managed by the key vault service. +type KeyAttributes struct { + // RecoverableDays - READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. + RecoverableDays *int32 `json:"recoverableDays,omitempty"` + // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for keys in the current vault. If it contains 'Purgeable' the key can be permanently deleted by a privileged user; otherwise, only the system can purge the key, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' + RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` + // Enabled - Determines whether the object is enabled. + Enabled *bool `json:"enabled,omitempty"` + // NotBefore - Not before date in UTC. + NotBefore *date.UnixTime `json:"nbf,omitempty"` + // Expires - Expiry date in UTC. + Expires *date.UnixTime `json:"exp,omitempty"` + // Created - READ-ONLY; Creation time in UTC. + Created *date.UnixTime `json:"created,omitempty"` + // Updated - READ-ONLY; Last updated time in UTC. + Updated *date.UnixTime `json:"updated,omitempty"` +} + +// MarshalJSON is the custom marshaler for KeyAttributes. +func (ka KeyAttributes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ka.Enabled != nil { + objectMap["enabled"] = ka.Enabled + } + if ka.NotBefore != nil { + objectMap["nbf"] = ka.NotBefore + } + if ka.Expires != nil { + objectMap["exp"] = ka.Expires + } + return json.Marshal(objectMap) +} + +// KeyBundle a KeyBundle consisting of a WebKey plus its attributes. +type KeyBundle struct { + autorest.Response `json:"-"` + // Key - The Json web key. + Key *JSONWebKey `json:"key,omitempty"` + // Attributes - The key management attributes. + Attributes *KeyAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` + // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. + Managed *bool `json:"managed,omitempty"` +} + +// MarshalJSON is the custom marshaler for KeyBundle. +func (kb KeyBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if kb.Key != nil { + objectMap["key"] = kb.Key + } + if kb.Attributes != nil { + objectMap["attributes"] = kb.Attributes + } + if kb.Tags != nil { + objectMap["tags"] = kb.Tags + } + return json.Marshal(objectMap) +} + +// KeyCreateParameters the key create parameters. +type KeyCreateParameters struct { + // Kty - The type of key to create. For valid values, see JsonWebKeyType. Possible values include: 'EC', 'ECHSM', 'RSA', 'RSAHSM', 'Oct' + Kty JSONWebKeyType `json:"kty,omitempty"` + // KeySize - The key size in bits. For example: 2048, 3072, or 4096 for RSA. + KeySize *int32 `json:"key_size,omitempty"` + KeyOps *[]JSONWebKeyOperation `json:"key_ops,omitempty"` + KeyAttributes *KeyAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` + // Curve - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'P256K' + Curve JSONWebKeyCurveName `json:"crv,omitempty"` +} + +// MarshalJSON is the custom marshaler for KeyCreateParameters. +func (kcp KeyCreateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if kcp.Kty != "" { + objectMap["kty"] = kcp.Kty + } + if kcp.KeySize != nil { + objectMap["key_size"] = kcp.KeySize + } + if kcp.KeyOps != nil { + objectMap["key_ops"] = kcp.KeyOps + } + if kcp.KeyAttributes != nil { + objectMap["attributes"] = kcp.KeyAttributes + } + if kcp.Tags != nil { + objectMap["tags"] = kcp.Tags + } + if kcp.Curve != "" { + objectMap["crv"] = kcp.Curve + } + return json.Marshal(objectMap) +} + +// KeyImportParameters the key import parameters. +type KeyImportParameters struct { + // Hsm - Whether to import as a hardware key (HSM) or software key. + Hsm *bool `json:"Hsm,omitempty"` + // Key - The Json web key + Key *JSONWebKey `json:"key,omitempty"` + // KeyAttributes - The key management attributes. + KeyAttributes *KeyAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for KeyImportParameters. +func (kip KeyImportParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if kip.Hsm != nil { + objectMap["Hsm"] = kip.Hsm + } + if kip.Key != nil { + objectMap["key"] = kip.Key + } + if kip.KeyAttributes != nil { + objectMap["attributes"] = kip.KeyAttributes + } + if kip.Tags != nil { + objectMap["tags"] = kip.Tags + } + return json.Marshal(objectMap) +} + +// KeyItem the key item containing key metadata. +type KeyItem struct { + // Kid - Key identifier. + Kid *string `json:"kid,omitempty"` + // Attributes - The key management attributes. + Attributes *KeyAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` + // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. + Managed *bool `json:"managed,omitempty"` +} + +// MarshalJSON is the custom marshaler for KeyItem. +func (ki KeyItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ki.Kid != nil { + objectMap["kid"] = ki.Kid + } + if ki.Attributes != nil { + objectMap["attributes"] = ki.Attributes + } + if ki.Tags != nil { + objectMap["tags"] = ki.Tags + } + return json.Marshal(objectMap) +} + +// KeyListResult the key list result. +type KeyListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; A response message containing a list of keys in the key vault along with a link to the next page of keys. + Value *[]KeyItem `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of keys. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for KeyListResult. +func (klr KeyListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// KeyListResultIterator provides access to a complete listing of KeyItem values. +type KeyListResultIterator struct { + i int + page KeyListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *KeyListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/KeyListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *KeyListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter KeyListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter KeyListResultIterator) Response() KeyListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter KeyListResultIterator) Value() KeyItem { + if !iter.page.NotDone() { + return KeyItem{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the KeyListResultIterator type. +func NewKeyListResultIterator(page KeyListResultPage) KeyListResultIterator { + return KeyListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (klr KeyListResult) IsEmpty() bool { + return klr.Value == nil || len(*klr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (klr KeyListResult) hasNextLink() bool { + return klr.NextLink != nil && len(*klr.NextLink) != 0 +} + +// keyListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (klr KeyListResult) keyListResultPreparer(ctx context.Context) (*http.Request, error) { + if !klr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(klr.NextLink))) +} + +// KeyListResultPage contains a page of KeyItem values. +type KeyListResultPage struct { + fn func(context.Context, KeyListResult) (KeyListResult, error) + klr KeyListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *KeyListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/KeyListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.klr) + if err != nil { + return err + } + page.klr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *KeyListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page KeyListResultPage) NotDone() bool { + return !page.klr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page KeyListResultPage) Response() KeyListResult { + return page.klr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page KeyListResultPage) Values() []KeyItem { + if page.klr.IsEmpty() { + return nil + } + return *page.klr.Value +} + +// Creates a new instance of the KeyListResultPage type. +func NewKeyListResultPage(cur KeyListResult, getNextPage func(context.Context, KeyListResult) (KeyListResult, error)) KeyListResultPage { + return KeyListResultPage{ + fn: getNextPage, + klr: cur, + } +} + +// KeyOperationResult the key operation result. +type KeyOperationResult struct { + autorest.Response `json:"-"` + // Kid - READ-ONLY; Key identifier + Kid *string `json:"kid,omitempty"` + // Result - READ-ONLY; a URL-encoded base64 string + Result *string `json:"value,omitempty"` +} + +// MarshalJSON is the custom marshaler for KeyOperationResult. +func (kor KeyOperationResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// KeyOperationsParameters the key operations parameters. +type KeyOperationsParameters struct { + // Algorithm - algorithm identifier. Possible values include: 'RSAOAEP', 'RSAOAEP256', 'RSA15' + Algorithm JSONWebKeyEncryptionAlgorithm `json:"alg,omitempty"` + // Value - a URL-encoded base64 string + Value *string `json:"value,omitempty"` +} + +// KeyProperties properties of the key pair backing a certificate. +type KeyProperties struct { + // Exportable - Indicates if the private key can be exported. + Exportable *bool `json:"exportable,omitempty"` + // KeyType - The type of key pair to be used for the certificate. Possible values include: 'EC', 'ECHSM', 'RSA', 'RSAHSM', 'Oct' + KeyType JSONWebKeyType `json:"kty,omitempty"` + // KeySize - The key size in bits. For example: 2048, 3072, or 4096 for RSA. + KeySize *int32 `json:"key_size,omitempty"` + // ReuseKey - Indicates if the same key pair will be used on certificate renewal. + ReuseKey *bool `json:"reuse_key,omitempty"` + // Curve - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'P256K' + Curve JSONWebKeyCurveName `json:"crv,omitempty"` +} + +// KeyRestoreParameters the key restore parameters. +type KeyRestoreParameters struct { + // KeyBundleBackup - The backup blob associated with a key bundle. (a URL-encoded base64 string) + KeyBundleBackup *string `json:"value,omitempty"` +} + +// KeySignParameters the key operations parameters. +type KeySignParameters struct { + // Algorithm - The signing/verification algorithm identifier. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. Possible values include: 'PS256', 'PS384', 'PS512', 'RS256', 'RS384', 'RS512', 'RSNULL', 'ES256', 'ES384', 'ES512', 'ES256K' + Algorithm JSONWebKeySignatureAlgorithm `json:"alg,omitempty"` + // Value - a URL-encoded base64 string + Value *string `json:"value,omitempty"` +} + +// KeyUpdateParameters the key update parameters. +type KeyUpdateParameters struct { + // KeyOps - Json web key operations. For more information on possible key operations, see JsonWebKeyOperation. + KeyOps *[]JSONWebKeyOperation `json:"key_ops,omitempty"` + KeyAttributes *KeyAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for KeyUpdateParameters. +func (kup KeyUpdateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if kup.KeyOps != nil { + objectMap["key_ops"] = kup.KeyOps + } + if kup.KeyAttributes != nil { + objectMap["attributes"] = kup.KeyAttributes + } + if kup.Tags != nil { + objectMap["tags"] = kup.Tags + } + return json.Marshal(objectMap) +} + +// KeyVerifyParameters the key verify parameters. +type KeyVerifyParameters struct { + // Algorithm - The signing/verification algorithm. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. Possible values include: 'PS256', 'PS384', 'PS512', 'RS256', 'RS384', 'RS512', 'RSNULL', 'ES256', 'ES384', 'ES512', 'ES256K' + Algorithm JSONWebKeySignatureAlgorithm `json:"alg,omitempty"` + // Digest - The digest used for signing. (a URL-encoded base64 string) + Digest *string `json:"digest,omitempty"` + // Signature - The signature to be verified. (a URL-encoded base64 string) + Signature *string `json:"value,omitempty"` +} + +// KeyVerifyResult the key verify result. +type KeyVerifyResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; True if the signature is verified, otherwise false. + Value *bool `json:"value,omitempty"` +} + +// MarshalJSON is the custom marshaler for KeyVerifyResult. +func (kvr KeyVerifyResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// LifetimeAction action and its trigger that will be performed by Key Vault over the lifetime of a +// certificate. +type LifetimeAction struct { + // Trigger - The condition that will execute the action. + Trigger *Trigger `json:"trigger,omitempty"` + // Action - The action that will be executed. + Action *Action `json:"action,omitempty"` +} + +// OrganizationDetails details of the organization of the certificate issuer. +type OrganizationDetails struct { + // ID - Id of the organization. + ID *string `json:"id,omitempty"` + // AdminDetails - Details of the organization administrator. + AdminDetails *[]AdministratorDetails `json:"admin_details,omitempty"` +} + +// PendingCertificateSigningRequestResult the pending certificate signing request result. +type PendingCertificateSigningRequestResult struct { + // Value - READ-ONLY; The pending certificate signing request as Base64 encoded string. + Value *string `json:"value,omitempty"` +} + +// MarshalJSON is the custom marshaler for PendingCertificateSigningRequestResult. +func (pcsrr PendingCertificateSigningRequestResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// SasDefinitionAttributes the SAS definition management attributes. +type SasDefinitionAttributes struct { + // Enabled - the enabled state of the object. + Enabled *bool `json:"enabled,omitempty"` + // Created - READ-ONLY; Creation time in UTC. + Created *date.UnixTime `json:"created,omitempty"` + // Updated - READ-ONLY; Last updated time in UTC. + Updated *date.UnixTime `json:"updated,omitempty"` + // RecoverableDays - READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. + RecoverableDays *int32 `json:"recoverableDays,omitempty"` + // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for SAS definitions in the current vault. If it contains 'Purgeable' the SAS definition can be permanently deleted by a privileged user; otherwise, only the system can purge the SAS definition, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' + RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` +} + +// MarshalJSON is the custom marshaler for SasDefinitionAttributes. +func (sda SasDefinitionAttributes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if sda.Enabled != nil { + objectMap["enabled"] = sda.Enabled + } + return json.Marshal(objectMap) +} + +// SasDefinitionBundle a SAS definition bundle consists of key vault SAS definition details plus its +// attributes. +type SasDefinitionBundle struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; The SAS definition id. + ID *string `json:"id,omitempty"` + // SecretID - READ-ONLY; Storage account SAS definition secret id. + SecretID *string `json:"sid,omitempty"` + // TemplateURI - READ-ONLY; The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. + TemplateURI *string `json:"templateUri,omitempty"` + // SasType - READ-ONLY; The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' + SasType SasTokenType `json:"sasType,omitempty"` + // ValidityPeriod - READ-ONLY; The validity period of SAS tokens created according to the SAS definition. + ValidityPeriod *string `json:"validityPeriod,omitempty"` + // Attributes - READ-ONLY; The SAS definition attributes. + Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` + // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for SasDefinitionBundle. +func (sdb SasDefinitionBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// SasDefinitionCreateParameters the SAS definition create parameters. +type SasDefinitionCreateParameters struct { + // TemplateURI - The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. + TemplateURI *string `json:"templateUri,omitempty"` + // SasType - The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' + SasType SasTokenType `json:"sasType,omitempty"` + // ValidityPeriod - The validity period of SAS tokens created according to the SAS definition. + ValidityPeriod *string `json:"validityPeriod,omitempty"` + // SasDefinitionAttributes - The attributes of the SAS definition. + SasDefinitionAttributes *SasDefinitionAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for SasDefinitionCreateParameters. +func (sdcp SasDefinitionCreateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if sdcp.TemplateURI != nil { + objectMap["templateUri"] = sdcp.TemplateURI + } + if sdcp.SasType != "" { + objectMap["sasType"] = sdcp.SasType + } + if sdcp.ValidityPeriod != nil { + objectMap["validityPeriod"] = sdcp.ValidityPeriod + } + if sdcp.SasDefinitionAttributes != nil { + objectMap["attributes"] = sdcp.SasDefinitionAttributes + } + if sdcp.Tags != nil { + objectMap["tags"] = sdcp.Tags + } + return json.Marshal(objectMap) +} + +// SasDefinitionItem the SAS definition item containing storage SAS definition metadata. +type SasDefinitionItem struct { + // ID - READ-ONLY; The storage SAS identifier. + ID *string `json:"id,omitempty"` + // SecretID - READ-ONLY; The storage account SAS definition secret id. + SecretID *string `json:"sid,omitempty"` + // Attributes - READ-ONLY; The SAS definition management attributes. + Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` + // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for SasDefinitionItem. +func (sdi SasDefinitionItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// SasDefinitionListResult the storage account SAS definition list result. +type SasDefinitionListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; A response message containing a list of SAS definitions along with a link to the next page of SAS definitions. + Value *[]SasDefinitionItem `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of SAS definitions. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for SasDefinitionListResult. +func (sdlr SasDefinitionListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// SasDefinitionListResultIterator provides access to a complete listing of SasDefinitionItem values. +type SasDefinitionListResultIterator struct { + i int + page SasDefinitionListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SasDefinitionListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SasDefinitionListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *SasDefinitionListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SasDefinitionListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter SasDefinitionListResultIterator) Response() SasDefinitionListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SasDefinitionListResultIterator) Value() SasDefinitionItem { + if !iter.page.NotDone() { + return SasDefinitionItem{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the SasDefinitionListResultIterator type. +func NewSasDefinitionListResultIterator(page SasDefinitionListResultPage) SasDefinitionListResultIterator { + return SasDefinitionListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (sdlr SasDefinitionListResult) IsEmpty() bool { + return sdlr.Value == nil || len(*sdlr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (sdlr SasDefinitionListResult) hasNextLink() bool { + return sdlr.NextLink != nil && len(*sdlr.NextLink) != 0 +} + +// sasDefinitionListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (sdlr SasDefinitionListResult) sasDefinitionListResultPreparer(ctx context.Context) (*http.Request, error) { + if !sdlr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(sdlr.NextLink))) +} + +// SasDefinitionListResultPage contains a page of SasDefinitionItem values. +type SasDefinitionListResultPage struct { + fn func(context.Context, SasDefinitionListResult) (SasDefinitionListResult, error) + sdlr SasDefinitionListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SasDefinitionListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SasDefinitionListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.sdlr) + if err != nil { + return err + } + page.sdlr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *SasDefinitionListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SasDefinitionListResultPage) NotDone() bool { + return !page.sdlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SasDefinitionListResultPage) Response() SasDefinitionListResult { + return page.sdlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SasDefinitionListResultPage) Values() []SasDefinitionItem { + if page.sdlr.IsEmpty() { + return nil + } + return *page.sdlr.Value +} + +// Creates a new instance of the SasDefinitionListResultPage type. +func NewSasDefinitionListResultPage(cur SasDefinitionListResult, getNextPage func(context.Context, SasDefinitionListResult) (SasDefinitionListResult, error)) SasDefinitionListResultPage { + return SasDefinitionListResultPage{ + fn: getNextPage, + sdlr: cur, + } +} + +// SasDefinitionUpdateParameters the SAS definition update parameters. +type SasDefinitionUpdateParameters struct { + // TemplateURI - The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. + TemplateURI *string `json:"templateUri,omitempty"` + // SasType - The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' + SasType SasTokenType `json:"sasType,omitempty"` + // ValidityPeriod - The validity period of SAS tokens created according to the SAS definition. + ValidityPeriod *string `json:"validityPeriod,omitempty"` + // SasDefinitionAttributes - The attributes of the SAS definition. + SasDefinitionAttributes *SasDefinitionAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for SasDefinitionUpdateParameters. +func (sdup SasDefinitionUpdateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if sdup.TemplateURI != nil { + objectMap["templateUri"] = sdup.TemplateURI + } + if sdup.SasType != "" { + objectMap["sasType"] = sdup.SasType + } + if sdup.ValidityPeriod != nil { + objectMap["validityPeriod"] = sdup.ValidityPeriod + } + if sdup.SasDefinitionAttributes != nil { + objectMap["attributes"] = sdup.SasDefinitionAttributes + } + if sdup.Tags != nil { + objectMap["tags"] = sdup.Tags + } + return json.Marshal(objectMap) +} + +// SecretAttributes the secret management attributes. +type SecretAttributes struct { + // RecoverableDays - READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. + RecoverableDays *int32 `json:"recoverableDays,omitempty"` + // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for secrets in the current vault. If it contains 'Purgeable', the secret can be permanently deleted by a privileged user; otherwise, only the system can purge the secret, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' + RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` + // Enabled - Determines whether the object is enabled. + Enabled *bool `json:"enabled,omitempty"` + // NotBefore - Not before date in UTC. + NotBefore *date.UnixTime `json:"nbf,omitempty"` + // Expires - Expiry date in UTC. + Expires *date.UnixTime `json:"exp,omitempty"` + // Created - READ-ONLY; Creation time in UTC. + Created *date.UnixTime `json:"created,omitempty"` + // Updated - READ-ONLY; Last updated time in UTC. + Updated *date.UnixTime `json:"updated,omitempty"` +} + +// MarshalJSON is the custom marshaler for SecretAttributes. +func (sa SecretAttributes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if sa.Enabled != nil { + objectMap["enabled"] = sa.Enabled + } + if sa.NotBefore != nil { + objectMap["nbf"] = sa.NotBefore + } + if sa.Expires != nil { + objectMap["exp"] = sa.Expires + } + return json.Marshal(objectMap) +} + +// SecretBundle a secret consisting of a value, id and its attributes. +type SecretBundle struct { + autorest.Response `json:"-"` + // Value - The secret value. + Value *string `json:"value,omitempty"` + // ID - The secret id. + ID *string `json:"id,omitempty"` + // ContentType - The content type of the secret. + ContentType *string `json:"contentType,omitempty"` + // Attributes - The secret management attributes. + Attributes *SecretAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` + // Kid - READ-ONLY; If this is a secret backing a KV certificate, then this field specifies the corresponding key backing the KV certificate. + Kid *string `json:"kid,omitempty"` + // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a secret backing a certificate, then managed will be true. + Managed *bool `json:"managed,omitempty"` +} + +// MarshalJSON is the custom marshaler for SecretBundle. +func (sb SecretBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if sb.Value != nil { + objectMap["value"] = sb.Value + } + if sb.ID != nil { + objectMap["id"] = sb.ID + } + if sb.ContentType != nil { + objectMap["contentType"] = sb.ContentType + } + if sb.Attributes != nil { + objectMap["attributes"] = sb.Attributes + } + if sb.Tags != nil { + objectMap["tags"] = sb.Tags + } + return json.Marshal(objectMap) +} + +// SecretItem the secret item containing secret metadata. +type SecretItem struct { + // ID - Secret identifier. + ID *string `json:"id,omitempty"` + // Attributes - The secret management attributes. + Attributes *SecretAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` + // ContentType - Type of the secret value such as a password. + ContentType *string `json:"contentType,omitempty"` + // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. + Managed *bool `json:"managed,omitempty"` +} + +// MarshalJSON is the custom marshaler for SecretItem. +func (si SecretItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if si.ID != nil { + objectMap["id"] = si.ID + } + if si.Attributes != nil { + objectMap["attributes"] = si.Attributes + } + if si.Tags != nil { + objectMap["tags"] = si.Tags + } + if si.ContentType != nil { + objectMap["contentType"] = si.ContentType + } + return json.Marshal(objectMap) +} + +// SecretListResult the secret list result. +type SecretListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; A response message containing a list of secrets in the key vault along with a link to the next page of secrets. + Value *[]SecretItem `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of secrets. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for SecretListResult. +func (slr SecretListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// SecretListResultIterator provides access to a complete listing of SecretItem values. +type SecretListResultIterator struct { + i int + page SecretListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SecretListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecretListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *SecretListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SecretListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter SecretListResultIterator) Response() SecretListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SecretListResultIterator) Value() SecretItem { + if !iter.page.NotDone() { + return SecretItem{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the SecretListResultIterator type. +func NewSecretListResultIterator(page SecretListResultPage) SecretListResultIterator { + return SecretListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (slr SecretListResult) IsEmpty() bool { + return slr.Value == nil || len(*slr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (slr SecretListResult) hasNextLink() bool { + return slr.NextLink != nil && len(*slr.NextLink) != 0 +} + +// secretListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (slr SecretListResult) secretListResultPreparer(ctx context.Context) (*http.Request, error) { + if !slr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(slr.NextLink))) +} + +// SecretListResultPage contains a page of SecretItem values. +type SecretListResultPage struct { + fn func(context.Context, SecretListResult) (SecretListResult, error) + slr SecretListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SecretListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecretListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.slr) + if err != nil { + return err + } + page.slr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *SecretListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SecretListResultPage) NotDone() bool { + return !page.slr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SecretListResultPage) Response() SecretListResult { + return page.slr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SecretListResultPage) Values() []SecretItem { + if page.slr.IsEmpty() { + return nil + } + return *page.slr.Value +} + +// Creates a new instance of the SecretListResultPage type. +func NewSecretListResultPage(cur SecretListResult, getNextPage func(context.Context, SecretListResult) (SecretListResult, error)) SecretListResultPage { + return SecretListResultPage{ + fn: getNextPage, + slr: cur, + } +} + +// SecretProperties properties of the key backing a certificate. +type SecretProperties struct { + // ContentType - The media type (MIME type). + ContentType *string `json:"contentType,omitempty"` +} + +// SecretRestoreParameters the secret restore parameters. +type SecretRestoreParameters struct { + // SecretBundleBackup - The backup blob associated with a secret bundle. (a URL-encoded base64 string) + SecretBundleBackup *string `json:"value,omitempty"` +} + +// SecretSetParameters the secret set parameters. +type SecretSetParameters struct { + // Value - The value of the secret. + Value *string `json:"value,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` + // ContentType - Type of the secret value such as a password. + ContentType *string `json:"contentType,omitempty"` + // SecretAttributes - The secret management attributes. + SecretAttributes *SecretAttributes `json:"attributes,omitempty"` +} + +// MarshalJSON is the custom marshaler for SecretSetParameters. +func (ssp SecretSetParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ssp.Value != nil { + objectMap["value"] = ssp.Value + } + if ssp.Tags != nil { + objectMap["tags"] = ssp.Tags + } + if ssp.ContentType != nil { + objectMap["contentType"] = ssp.ContentType + } + if ssp.SecretAttributes != nil { + objectMap["attributes"] = ssp.SecretAttributes + } + return json.Marshal(objectMap) +} + +// SecretUpdateParameters the secret update parameters. +type SecretUpdateParameters struct { + // ContentType - Type of the secret value such as a password. + ContentType *string `json:"contentType,omitempty"` + // SecretAttributes - The secret management attributes. + SecretAttributes *SecretAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for SecretUpdateParameters. +func (sup SecretUpdateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if sup.ContentType != nil { + objectMap["contentType"] = sup.ContentType + } + if sup.SecretAttributes != nil { + objectMap["attributes"] = sup.SecretAttributes + } + if sup.Tags != nil { + objectMap["tags"] = sup.Tags + } + return json.Marshal(objectMap) +} + +// StorageAccountAttributes the storage account management attributes. +type StorageAccountAttributes struct { + // Enabled - the enabled state of the object. + Enabled *bool `json:"enabled,omitempty"` + // Created - READ-ONLY; Creation time in UTC. + Created *date.UnixTime `json:"created,omitempty"` + // Updated - READ-ONLY; Last updated time in UTC. + Updated *date.UnixTime `json:"updated,omitempty"` + // RecoverableDays - READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. + RecoverableDays *int32 `json:"recoverableDays,omitempty"` + // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for storage accounts in the current vault. If it contains 'Purgeable' the storage account can be permanently deleted by a privileged user; otherwise, only the system can purge the storage account, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' + RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` +} + +// MarshalJSON is the custom marshaler for StorageAccountAttributes. +func (saa StorageAccountAttributes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if saa.Enabled != nil { + objectMap["enabled"] = saa.Enabled + } + return json.Marshal(objectMap) +} + +// StorageAccountCreateParameters the storage account create parameters. +type StorageAccountCreateParameters struct { + // ResourceID - Storage account resource id. + ResourceID *string `json:"resourceId,omitempty"` + // ActiveKeyName - Current active storage account key name. + ActiveKeyName *string `json:"activeKeyName,omitempty"` + // AutoRegenerateKey - whether keyvault should manage the storage account for the user. + AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` + // RegenerationPeriod - The key regeneration time duration specified in ISO-8601 format. + RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` + // StorageAccountAttributes - The attributes of the storage account. + StorageAccountAttributes *StorageAccountAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for StorageAccountCreateParameters. +func (sacp StorageAccountCreateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if sacp.ResourceID != nil { + objectMap["resourceId"] = sacp.ResourceID + } + if sacp.ActiveKeyName != nil { + objectMap["activeKeyName"] = sacp.ActiveKeyName + } + if sacp.AutoRegenerateKey != nil { + objectMap["autoRegenerateKey"] = sacp.AutoRegenerateKey + } + if sacp.RegenerationPeriod != nil { + objectMap["regenerationPeriod"] = sacp.RegenerationPeriod + } + if sacp.StorageAccountAttributes != nil { + objectMap["attributes"] = sacp.StorageAccountAttributes + } + if sacp.Tags != nil { + objectMap["tags"] = sacp.Tags + } + return json.Marshal(objectMap) +} + +// StorageAccountItem the storage account item containing storage account metadata. +type StorageAccountItem struct { + // ID - READ-ONLY; Storage identifier. + ID *string `json:"id,omitempty"` + // ResourceID - READ-ONLY; Storage account resource Id. + ResourceID *string `json:"resourceId,omitempty"` + // Attributes - READ-ONLY; The storage account management attributes. + Attributes *StorageAccountAttributes `json:"attributes,omitempty"` + // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for StorageAccountItem. +func (sai StorageAccountItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// StorageAccountRegenerteKeyParameters the storage account key regenerate parameters. +type StorageAccountRegenerteKeyParameters struct { + // KeyName - The storage account key name. + KeyName *string `json:"keyName,omitempty"` +} + +// StorageAccountUpdateParameters the storage account update parameters. +type StorageAccountUpdateParameters struct { + // ActiveKeyName - The current active storage account key name. + ActiveKeyName *string `json:"activeKeyName,omitempty"` + // AutoRegenerateKey - whether keyvault should manage the storage account for the user. + AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` + // RegenerationPeriod - The key regeneration time duration specified in ISO-8601 format. + RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` + // StorageAccountAttributes - The attributes of the storage account. + StorageAccountAttributes *StorageAccountAttributes `json:"attributes,omitempty"` + // Tags - Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for StorageAccountUpdateParameters. +func (saup StorageAccountUpdateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if saup.ActiveKeyName != nil { + objectMap["activeKeyName"] = saup.ActiveKeyName + } + if saup.AutoRegenerateKey != nil { + objectMap["autoRegenerateKey"] = saup.AutoRegenerateKey + } + if saup.RegenerationPeriod != nil { + objectMap["regenerationPeriod"] = saup.RegenerationPeriod + } + if saup.StorageAccountAttributes != nil { + objectMap["attributes"] = saup.StorageAccountAttributes + } + if saup.Tags != nil { + objectMap["tags"] = saup.Tags + } + return json.Marshal(objectMap) +} + +// StorageBundle a Storage account bundle consists of key vault storage account details plus its +// attributes. +type StorageBundle struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; The storage account id. + ID *string `json:"id,omitempty"` + // ResourceID - READ-ONLY; The storage account resource id. + ResourceID *string `json:"resourceId,omitempty"` + // ActiveKeyName - READ-ONLY; The current active storage account key name. + ActiveKeyName *string `json:"activeKeyName,omitempty"` + // AutoRegenerateKey - READ-ONLY; whether keyvault should manage the storage account for the user. + AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` + // RegenerationPeriod - READ-ONLY; The key regeneration time duration specified in ISO-8601 format. + RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` + // Attributes - READ-ONLY; The storage account attributes. + Attributes *StorageAccountAttributes `json:"attributes,omitempty"` + // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for StorageBundle. +func (sb StorageBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// StorageListResult the storage accounts list result. +type StorageListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; A response message containing a list of storage accounts in the key vault along with a link to the next page of storage accounts. + Value *[]StorageAccountItem `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of storage accounts. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for StorageListResult. +func (slr StorageListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// StorageListResultIterator provides access to a complete listing of StorageAccountItem values. +type StorageListResultIterator struct { + i int + page StorageListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *StorageListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StorageListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *StorageListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter StorageListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter StorageListResultIterator) Response() StorageListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter StorageListResultIterator) Value() StorageAccountItem { + if !iter.page.NotDone() { + return StorageAccountItem{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the StorageListResultIterator type. +func NewStorageListResultIterator(page StorageListResultPage) StorageListResultIterator { + return StorageListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (slr StorageListResult) IsEmpty() bool { + return slr.Value == nil || len(*slr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (slr StorageListResult) hasNextLink() bool { + return slr.NextLink != nil && len(*slr.NextLink) != 0 +} + +// storageListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (slr StorageListResult) storageListResultPreparer(ctx context.Context) (*http.Request, error) { + if !slr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(slr.NextLink))) +} + +// StorageListResultPage contains a page of StorageAccountItem values. +type StorageListResultPage struct { + fn func(context.Context, StorageListResult) (StorageListResult, error) + slr StorageListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *StorageListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StorageListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.slr) + if err != nil { + return err + } + page.slr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *StorageListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page StorageListResultPage) NotDone() bool { + return !page.slr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page StorageListResultPage) Response() StorageListResult { + return page.slr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page StorageListResultPage) Values() []StorageAccountItem { + if page.slr.IsEmpty() { + return nil + } + return *page.slr.Value +} + +// Creates a new instance of the StorageListResultPage type. +func NewStorageListResultPage(cur StorageListResult, getNextPage func(context.Context, StorageListResult) (StorageListResult, error)) StorageListResultPage { + return StorageListResultPage{ + fn: getNextPage, + slr: cur, + } +} + +// StorageRestoreParameters the secret restore parameters. +type StorageRestoreParameters struct { + // StorageBundleBackup - The backup blob associated with a storage account. (a URL-encoded base64 string) + StorageBundleBackup *string `json:"value,omitempty"` +} + +// SubjectAlternativeNames the subject alternate names of a X509 object. +type SubjectAlternativeNames struct { + // Emails - Email addresses. + Emails *[]string `json:"emails,omitempty"` + // DNSNames - Domain names. + DNSNames *[]string `json:"dns_names,omitempty"` + // Upns - User principal names. + Upns *[]string `json:"upns,omitempty"` +} + +// Trigger a condition to be satisfied for an action to be executed. +type Trigger struct { + // LifetimePercentage - Percentage of lifetime at which to trigger. Value should be between 1 and 99. + LifetimePercentage *int32 `json:"lifetime_percentage,omitempty"` + // DaysBeforeExpiry - Days before expiry to attempt renewal. Value should be between 1 and validity_in_months multiplied by 27. If validity_in_months is 36, then value should be between 1 and 972 (36 * 27). + DaysBeforeExpiry *int32 `json:"days_before_expiry,omitempty"` +} + +// X509CertificateProperties properties of the X509 component of a certificate. +type X509CertificateProperties struct { + // Subject - The subject name. Should be a valid X509 distinguished Name. + Subject *string `json:"subject,omitempty"` + // Ekus - The enhanced key usage. + Ekus *[]string `json:"ekus,omitempty"` + // SubjectAlternativeNames - The subject alternative names. + SubjectAlternativeNames *SubjectAlternativeNames `json:"sans,omitempty"` + // KeyUsage - List of key usages. + KeyUsage *[]KeyUsageType `json:"key_usage,omitempty"` + // ValidityInMonths - The duration that the certificate is valid in months. + ValidityInMonths *int32 `json:"validity_months,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/version.go new file mode 100644 index 00000000000..60143005f3e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/version.go @@ -0,0 +1,19 @@ +package keyvault + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + Version() + " keyvault/7.1" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go new file mode 100644 index 00000000000..86694bd2555 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go @@ -0,0 +1,152 @@ +/* +Package to provides helpers to ease working with pointer values of marshalled structures. +*/ +package to + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// String returns a string value for the passed string pointer. It returns the empty string if the +// pointer is nil. +func String(s *string) string { + if s != nil { + return *s + } + return "" +} + +// StringPtr returns a pointer to the passed string. +func StringPtr(s string) *string { + return &s +} + +// StringSlice returns a string slice value for the passed string slice pointer. It returns a nil +// slice if the pointer is nil. +func StringSlice(s *[]string) []string { + if s != nil { + return *s + } + return nil +} + +// StringSlicePtr returns a pointer to the passed string slice. +func StringSlicePtr(s []string) *[]string { + return &s +} + +// StringMap returns a map of strings built from the map of string pointers. The empty string is +// used for nil pointers. +func StringMap(msp map[string]*string) map[string]string { + ms := make(map[string]string, len(msp)) + for k, sp := range msp { + if sp != nil { + ms[k] = *sp + } else { + ms[k] = "" + } + } + return ms +} + +// StringMapPtr returns a pointer to a map of string pointers built from the passed map of strings. +func StringMapPtr(ms map[string]string) *map[string]*string { + msp := make(map[string]*string, len(ms)) + for k, s := range ms { + msp[k] = StringPtr(s) + } + return &msp +} + +// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. +func Bool(b *bool) bool { + if b != nil { + return *b + } + return false +} + +// BoolPtr returns a pointer to the passed bool. +func BoolPtr(b bool) *bool { + return &b +} + +// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int(i *int) int { + if i != nil { + return *i + } + return 0 +} + +// IntPtr returns a pointer to the passed int. +func IntPtr(i int) *int { + return &i +} + +// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int32(i *int32) int32 { + if i != nil { + return *i + } + return 0 +} + +// Int32Ptr returns a pointer to the passed int32. +func Int32Ptr(i int32) *int32 { + return &i +} + +// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int64(i *int64) int64 { + if i != nil { + return *i + } + return 0 +} + +// Int64Ptr returns a pointer to the passed int64. +func Int64Ptr(i int64) *int64 { + return &i +} + +// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float32(i *float32) float32 { + if i != nil { + return *i + } + return 0.0 +} + +// Float32Ptr returns a pointer to the passed float32. +func Float32Ptr(i float32) *float32 { + return &i +} + +// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float64(i *float64) float64 { + if i != nil { + return *i + } + return 0.0 +} + +// Float64Ptr returns a pointer to the passed float64. +func Float64Ptr(i float64) *float64 { + return &i +} + +// ByteSlicePtr returns a pointer to the passed byte slice. +func ByteSlicePtr(b []byte) *[]byte { + return &b +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go new file mode 100644 index 00000000000..b7310f6b868 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package to + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/error.go b/vendor/github.com/Azure/go-autorest/autorest/validation/error.go new file mode 100644 index 00000000000..fed156dbf6e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/error.go @@ -0,0 +1,48 @@ +package validation + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" +) + +// Error is the type that's returned when the validation of an APIs arguments constraints fails. +type Error struct { + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // Message is the error message. + Message string +} + +// Error returns a string containing the details of the validation failure. +func (e Error) Error() string { + return fmt.Sprintf("%s#%s: Invalid input: %s", e.PackageType, e.Method, e.Message) +} + +// NewError creates a new Error object with the specified parameters. +// message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) Error { + return Error{ + PackageType: packageType, + Method: method, + Message: fmt.Sprintf(message, args...), + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go new file mode 100644 index 00000000000..cf1436291a7 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package validation + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go new file mode 100644 index 00000000000..ff41cfe0796 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go @@ -0,0 +1,406 @@ +/* +Package validation provides methods for validating parameter value using reflection. +*/ +package validation + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "reflect" + "regexp" + "strings" +) + +// Disabled controls if parameter validation should be globally disabled. The default is false. +var Disabled bool + +// Constraint stores constraint name, target field name +// Rule and chain validations. +type Constraint struct { + + // Target field name for validation. + Target string + + // Constraint name e.g. minLength, MaxLength, Pattern, etc. + Name string + + // Rule for constraint e.g. greater than 10, less than 5 etc. + Rule interface{} + + // Chain Validations for struct type + Chain []Constraint +} + +// Validation stores parameter-wise validation. +type Validation struct { + TargetValue interface{} + Constraints []Constraint +} + +// Constraint list +const ( + Empty = "Empty" + Null = "Null" + ReadOnly = "ReadOnly" + Pattern = "Pattern" + MaxLength = "MaxLength" + MinLength = "MinLength" + MaxItems = "MaxItems" + MinItems = "MinItems" + MultipleOf = "MultipleOf" + UniqueItems = "UniqueItems" + InclusiveMaximum = "InclusiveMaximum" + ExclusiveMaximum = "ExclusiveMaximum" + ExclusiveMinimum = "ExclusiveMinimum" + InclusiveMinimum = "InclusiveMinimum" +) + +// Validate method validates constraints on parameter +// passed in validation array. +func Validate(m []Validation) error { + if Disabled { + return nil + } + for _, item := range m { + v := reflect.ValueOf(item.TargetValue) + for _, constraint := range item.Constraints { + var err error + switch v.Kind() { + case reflect.Ptr: + err = validatePtr(v, constraint) + case reflect.String: + err = validateString(v, constraint) + case reflect.Struct: + err = validateStruct(v, constraint) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + err = validateInt(v, constraint) + case reflect.Float32, reflect.Float64: + err = validateFloat(v, constraint) + case reflect.Array, reflect.Slice, reflect.Map: + err = validateArrayMap(v, constraint) + default: + err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) + } + + if err != nil { + return err + } + } + } + return nil +} + +func validateStruct(x reflect.Value, v Constraint, name ...string) error { + //Get field name from target name which is in format a.b.c + s := strings.Split(v.Target, ".") + f := x.FieldByName(s[len(s)-1]) + if isZero(f) { + return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target)) + } + + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(f), + Constraints: []Constraint{v}, + }, + }) +} + +func validatePtr(x reflect.Value, v Constraint) error { + if v.Name == ReadOnly { + if !x.IsNil() { + return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") + } + return nil + } + if x.IsNil() { + return checkNil(x, v) + } + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x.Elem()), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func validateInt(x reflect.Value, v Constraint) error { + i := x.Int() + r, ok := toInt64(v.Rule) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + switch v.Name { + case MultipleOf: + if i%r != 0 { + return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) + } + case ExclusiveMinimum: + if i <= r { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case ExclusiveMaximum: + if i >= r { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case InclusiveMinimum: + if i < r { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case InclusiveMaximum: + if i > r { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.Name)) + } + return nil +} + +func validateFloat(x reflect.Value, v Constraint) error { + f := x.Float() + r, ok := v.Rule.(float64) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.Name, v.Rule)) + } + switch v.Name { + case ExclusiveMinimum: + if f <= r { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case ExclusiveMaximum: + if f >= r { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case InclusiveMinimum: + if f < r { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case InclusiveMaximum: + if f > r { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.Name)) + } + return nil +} + +func validateString(x reflect.Value, v Constraint) error { + s := x.String() + switch v.Name { + case Empty: + if len(s) == 0 { + return checkEmpty(x, v) + } + case Pattern: + reg, err := regexp.Compile(v.Rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + if !reg.MatchString(s) { + return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.Rule)) + } + case MaxLength: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + if len(s) > v.Rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be less than or equal to %v", v.Rule)) + } + case MinLength: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + if len(s) < v.Rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be greater than or equal to %v", v.Rule)) + } + case ReadOnly: + if len(s) > 0 { + return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.Name)) + } + + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func validateArrayMap(x reflect.Value, v Constraint) error { + switch v.Name { + case Null: + if x.IsNil() { + return checkNil(x, v) + } + case Empty: + if x.IsNil() || x.Len() == 0 { + return checkEmpty(x, v) + } + case MaxItems: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) + } + if x.Len() > v.Rule.(int) { + return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.Rule, x.Len())) + } + case MinItems: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) + } + if x.Len() < v.Rule.(int) { + return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.Rule, x.Len())) + } + case UniqueItems: + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + if !checkForUniqueInArray(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) + } + } else if x.Kind() == reflect.Map { + if !checkForUniqueInMap(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) + } + } else { + return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.Name, x.Kind())) + } + case ReadOnly: + if x.Len() != 0 { + return createError(x, v, "readonly parameter; must send as nil or empty in request") + } + case Pattern: + reg, err := regexp.Compile(v.Rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + keys := x.MapKeys() + for _, k := range keys { + if !reg.MatchString(k.String()) { + return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.Rule)) + } + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.Name)) + } + + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func checkNil(x reflect.Value, v Constraint) error { + if _, ok := v.Rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + } + if v.Rule.(bool) { + return createError(x, v, "value can not be null; required parameter") + } + return nil +} + +func checkEmpty(x reflect.Value, v Constraint) error { + if _, ok := v.Rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + } + + if v.Rule.(bool) { + return createError(x, v, "value can not be null or empty; required parameter") + } + return nil +} + +func checkForUniqueInArray(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + arrOfInterface := make([]interface{}, x.Len()) + + for i := 0; i < x.Len(); i++ { + arrOfInterface[i] = x.Index(i).Interface() + } + + m := make(map[interface{}]bool) + for _, val := range arrOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func checkForUniqueInMap(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + mapOfInterface := make(map[interface{}]interface{}, x.Len()) + + keys := x.MapKeys() + for _, k := range keys { + mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() + } + + m := make(map[interface{}]bool) + for _, val := range mapOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func getInterfaceValue(x reflect.Value) interface{} { + if x.Kind() == reflect.Invalid { + return nil + } + return x.Interface() +} + +func isZero(x interface{}) bool { + return x == reflect.Zero(reflect.TypeOf(x)).Interface() +} + +func createError(x reflect.Value, v Constraint, err string) error { + return fmt.Errorf("autorest/validation: validation failed: parameter=%s constraint=%s value=%#v details: %s", + v.Target, v.Name, getInterfaceValue(x), err) +} + +func toInt64(v interface{}) (int64, bool) { + if i64, ok := v.(int64); ok { + return i64, true + } + // older generators emit max constants as int, so if int64 fails fall back to int + if i32, ok := v.(int); ok { + return int64(i32), true + } + return 0, false +} diff --git a/vendor/github.com/armon/go-metrics/.gitignore b/vendor/github.com/armon/go-metrics/.gitignore new file mode 100644 index 00000000000..e5750f5720e --- /dev/null +++ b/vendor/github.com/armon/go-metrics/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +/metrics.out + +.idea diff --git a/vendor/github.com/armon/go-metrics/.travis.yml b/vendor/github.com/armon/go-metrics/.travis.yml new file mode 100644 index 00000000000..87d230c8d78 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - "1.x" + +env: + - GO111MODULE=on + +install: + - go get ./... + +script: + - go test ./... diff --git a/vendor/github.com/armon/go-metrics/LICENSE b/vendor/github.com/armon/go-metrics/LICENSE new file mode 100644 index 00000000000..106569e542b --- /dev/null +++ b/vendor/github.com/armon/go-metrics/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-metrics/README.md b/vendor/github.com/armon/go-metrics/README.md new file mode 100644 index 00000000000..aa73348c08d --- /dev/null +++ b/vendor/github.com/armon/go-metrics/README.md @@ -0,0 +1,91 @@ +go-metrics +========== + +This library provides a `metrics` package which can be used to instrument code, +expose application metrics, and profile runtime performance in a flexible manner. + +Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics) + +Sinks +----- + +The `metrics` package makes use of a `MetricSink` interface to support delivery +to any type of backend. Currently the following sinks are provided: + +* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP) +* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP) +* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes) +* InmemSink : Provides in-memory aggregation, can be used to export stats +* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example. +* BlackholeSink : Sinks to nowhere + +In addition to the sinks, the `InmemSignal` can be used to catch a signal, +and dump a formatted output of recent metrics. For example, when a process gets +a SIGUSR1, it can dump to stderr recent performance metrics for debugging. + +Labels +------ + +Most metrics do have an equivalent ending with `WithLabels`, such methods +allow to push metrics with labels and use some features of underlying Sinks +(ex: translated into Prometheus labels). + +Since some of these labels may increase greatly cardinality of metrics, the +library allow to filter labels using a blacklist/whitelist filtering system +which is global to all metrics. + +* If `Config.AllowedLabels` is not nil, then only labels specified in this value will be sent to underlying Sink, otherwise, all labels are sent by default. +* If `Config.BlockedLabels` is not nil, any label specified in this value will not be sent to underlying Sinks. + +By default, both `Config.AllowedLabels` and `Config.BlockedLabels` are nil, meaning that +no tags are filetered at all, but it allow to a user to globally block some tags with high +cardinality at application level. + +Examples +-------- + +Here is an example of using the package: + +```go +func SlowMethod() { + // Profiling the runtime of a method + defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now()) +} + +// Configure a statsite sink as the global metrics sink +sink, _ := metrics.NewStatsiteSink("statsite:8125") +metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) + +// Emit a Key/Value pair +metrics.EmitKey([]string{"questions", "meaning of life"}, 42) +``` + +Here is an example of setting up a signal handler: + +```go +// Setup the inmem sink and signal handler +inm := metrics.NewInmemSink(10*time.Second, time.Minute) +sig := metrics.DefaultInmemSignal(inm) +metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm) + +// Run some code +inm.SetGauge([]string{"foo"}, 42) +inm.EmitKey([]string{"bar"}, 30) + +inm.IncrCounter([]string{"baz"}, 42) +inm.IncrCounter([]string{"baz"}, 1) +inm.IncrCounter([]string{"baz"}, 80) + +inm.AddSample([]string{"method", "wow"}, 42) +inm.AddSample([]string{"method", "wow"}, 100) +inm.AddSample([]string{"method", "wow"}, 22) + +.... +``` + +When a signal comes in, output like the following will be dumped to stderr: + + [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000 + [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000 + [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509 + [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513 \ No newline at end of file diff --git a/vendor/github.com/armon/go-metrics/const_unix.go b/vendor/github.com/armon/go-metrics/const_unix.go new file mode 100644 index 00000000000..31098dd57e5 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + DefaultSignal = syscall.SIGUSR1 +) diff --git a/vendor/github.com/armon/go-metrics/const_windows.go b/vendor/github.com/armon/go-metrics/const_windows.go new file mode 100644 index 00000000000..38136af3e42 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + // Windows has no SIGUSR1, use SIGBREAK + DefaultSignal = syscall.Signal(21) +) diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go new file mode 100644 index 00000000000..7c427aca979 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem.go @@ -0,0 +1,339 @@ +package metrics + +import ( + "bytes" + "fmt" + "math" + "net/url" + "strings" + "sync" + "time" +) + +var spaceReplacer = strings.NewReplacer(" ", "_") + +// InmemSink provides a MetricSink that does in-memory aggregation +// without sending metrics over a network. It can be embedded within +// an application to provide profiling information. +type InmemSink struct { + // How long is each aggregation interval + interval time.Duration + + // Retain controls how many metrics interval we keep + retain time.Duration + + // maxIntervals is the maximum length of intervals. + // It is retain / interval. + maxIntervals int + + // intervals is a slice of the retained intervals + intervals []*IntervalMetrics + intervalLock sync.RWMutex + + rateDenom float64 +} + +// IntervalMetrics stores the aggregated metrics +// for a specific interval +type IntervalMetrics struct { + sync.RWMutex + + // The start time of the interval + Interval time.Time + + // Gauges maps the key to the last set value + Gauges map[string]GaugeValue + + // Points maps the string to the list of emitted values + // from EmitKey + Points map[string][]float32 + + // Counters maps the string key to a sum of the counter + // values + Counters map[string]SampledValue + + // Samples maps the key to an AggregateSample, + // which has the rolled up view of a sample + Samples map[string]SampledValue + + // done is closed when this interval has ended, and a new IntervalMetrics + // has been created to receive any future metrics. + done chan struct{} +} + +// NewIntervalMetrics creates a new IntervalMetrics for a given interval +func NewIntervalMetrics(intv time.Time) *IntervalMetrics { + return &IntervalMetrics{ + Interval: intv, + Gauges: make(map[string]GaugeValue), + Points: make(map[string][]float32), + Counters: make(map[string]SampledValue), + Samples: make(map[string]SampledValue), + done: make(chan struct{}), + } +} + +// AggregateSample is used to hold aggregate metrics +// about a sample +type AggregateSample struct { + Count int // The count of emitted pairs + Rate float64 // The values rate per time unit (usually 1 second) + Sum float64 // The sum of values + SumSq float64 `json:"-"` // The sum of squared values + Min float64 // Minimum value + Max float64 // Maximum value + LastUpdated time.Time `json:"-"` // When value was last updated +} + +// Computes a Stddev of the values +func (a *AggregateSample) Stddev() float64 { + num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) + div := float64(a.Count * (a.Count - 1)) + if div == 0 { + return 0 + } + return math.Sqrt(num / div) +} + +// Computes a mean of the values +func (a *AggregateSample) Mean() float64 { + if a.Count == 0 { + return 0 + } + return a.Sum / float64(a.Count) +} + +// Ingest is used to update a sample +func (a *AggregateSample) Ingest(v float64, rateDenom float64) { + a.Count++ + a.Sum += v + a.SumSq += (v * v) + if v < a.Min || a.Count == 1 { + a.Min = v + } + if v > a.Max || a.Count == 1 { + a.Max = v + } + a.Rate = float64(a.Sum) / rateDenom + a.LastUpdated = time.Now() +} + +func (a *AggregateSample) String() string { + if a.Count == 0 { + return "Count: 0" + } else if a.Stddev() == 0 { + return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated) + } else { + return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s", + a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated) + } +} + +// NewInmemSinkFromURL creates an InmemSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) { + params := u.Query() + + interval, err := time.ParseDuration(params.Get("interval")) + if err != nil { + return nil, fmt.Errorf("Bad 'interval' param: %s", err) + } + + retain, err := time.ParseDuration(params.Get("retain")) + if err != nil { + return nil, fmt.Errorf("Bad 'retain' param: %s", err) + } + + return NewInmemSink(interval, retain), nil +} + +// NewInmemSink is used to construct a new in-memory sink. +// Uses an aggregation interval and maximum retention period. +func NewInmemSink(interval, retain time.Duration) *InmemSink { + rateTimeUnit := time.Second + i := &InmemSink{ + interval: interval, + retain: retain, + maxIntervals: int(retain / interval), + rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()), + } + i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) + return i +} + +func (i *InmemSink) SetGauge(key []string, val float32) { + i.SetGaugeWithLabels(key, val, nil) +} + +func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels} +} + +func (i *InmemSink) EmitKey(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + vals := intv.Points[k] + intv.Points[k] = append(vals, val) +} + +func (i *InmemSink) IncrCounter(key []string, val float32) { + i.IncrCounterWithLabels(key, val, nil) +} + +func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Counters[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Counters[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +func (i *InmemSink) AddSample(key []string, val float32) { + i.AddSampleWithLabels(key, val, nil) +} + +func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Samples[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Samples[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +// Data is used to retrieve all the aggregated metrics +// Intervals may be in use, and a read lock should be acquired +func (i *InmemSink) Data() []*IntervalMetrics { + // Get the current interval, forces creation + i.getInterval() + + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + intervals := make([]*IntervalMetrics, n) + + copy(intervals[:n-1], i.intervals[:n-1]) + current := i.intervals[n-1] + + // make its own copy for current interval + intervals[n-1] = &IntervalMetrics{} + copyCurrent := intervals[n-1] + current.RLock() + *copyCurrent = *current + // RWMutex is not safe to copy, so create a new instance on the copy + copyCurrent.RWMutex = sync.RWMutex{} + + copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges)) + for k, v := range current.Gauges { + copyCurrent.Gauges[k] = v + } + // saved values will be not change, just copy its link + copyCurrent.Points = make(map[string][]float32, len(current.Points)) + for k, v := range current.Points { + copyCurrent.Points[k] = v + } + copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters)) + for k, v := range current.Counters { + copyCurrent.Counters[k] = v.deepCopy() + } + copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples)) + for k, v := range current.Samples { + copyCurrent.Samples[k] = v.deepCopy() + } + current.RUnlock() + + return intervals +} + +// getInterval returns the current interval. A new interval is created if no +// previous interval exists, or if the current time is beyond the window for the +// current interval. +func (i *InmemSink) getInterval() *IntervalMetrics { + intv := time.Now().Truncate(i.interval) + + // Attempt to return the existing interval first, because it only requires + // a read lock. + i.intervalLock.RLock() + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + defer i.intervalLock.RUnlock() + return i.intervals[n-1] + } + i.intervalLock.RUnlock() + + i.intervalLock.Lock() + defer i.intervalLock.Unlock() + + // Re-check for an existing interval now that the lock is re-acquired. + n = len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + + current := NewIntervalMetrics(intv) + i.intervals = append(i.intervals, current) + if n > 0 { + close(i.intervals[n-1].done) + } + + n++ + // Prune old intervals if the count exceeds the max. + if n >= i.maxIntervals { + copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) + i.intervals = i.intervals[:i.maxIntervals] + } + return current +} + +// Flattens the key for formatting, removes spaces +func (i *InmemSink) flattenKey(parts []string) string { + buf := &bytes.Buffer{} + + joined := strings.Join(parts, ".") + + spaceReplacer.WriteString(buf, joined) + + return buf.String() +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) { + key := i.flattenKey(parts) + buf := bytes.NewBufferString(key) + + for _, label := range labels { + spaceReplacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value)) + } + + return buf.String(), key +} diff --git a/vendor/github.com/armon/go-metrics/inmem_endpoint.go b/vendor/github.com/armon/go-metrics/inmem_endpoint.go new file mode 100644 index 00000000000..24eefa96389 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_endpoint.go @@ -0,0 +1,162 @@ +package metrics + +import ( + "context" + "fmt" + "net/http" + "sort" + "time" +) + +// MetricsSummary holds a roll-up of metrics info for a given interval +type MetricsSummary struct { + Timestamp string + Gauges []GaugeValue + Points []PointValue + Counters []SampledValue + Samples []SampledValue +} + +type GaugeValue struct { + Name string + Hash string `json:"-"` + Value float32 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +type PointValue struct { + Name string + Points []float32 +} + +type SampledValue struct { + Name string + Hash string `json:"-"` + *AggregateSample + Mean float64 + Stddev float64 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +// deepCopy allocates a new instance of AggregateSample +func (source *SampledValue) deepCopy() SampledValue { + dest := *source + if source.AggregateSample != nil { + dest.AggregateSample = &AggregateSample{} + *dest.AggregateSample = *source.AggregateSample + } + return dest +} + +// DisplayMetrics returns a summary of the metrics from the most recent finished interval. +func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + data := i.Data() + + var interval *IntervalMetrics + n := len(data) + switch { + case n == 0: + return nil, fmt.Errorf("no metric intervals have been initialized yet") + case n == 1: + // Show the current interval if it's all we have + interval = data[0] + default: + // Show the most recent finished interval if we have one + interval = data[n-2] + } + + return newMetricSummaryFromInterval(interval), nil +} + +func newMetricSummaryFromInterval(interval *IntervalMetrics) MetricsSummary { + interval.RLock() + defer interval.RUnlock() + + summary := MetricsSummary{ + Timestamp: interval.Interval.Round(time.Second).UTC().String(), + Gauges: make([]GaugeValue, 0, len(interval.Gauges)), + Points: make([]PointValue, 0, len(interval.Points)), + } + + // Format and sort the output of each metric type, so it gets displayed in a + // deterministic order. + for name, points := range interval.Points { + summary.Points = append(summary.Points, PointValue{name, points}) + } + sort.Slice(summary.Points, func(i, j int) bool { + return summary.Points[i].Name < summary.Points[j].Name + }) + + for hash, value := range interval.Gauges { + value.Hash = hash + value.DisplayLabels = make(map[string]string) + for _, label := range value.Labels { + value.DisplayLabels[label.Name] = label.Value + } + value.Labels = nil + + summary.Gauges = append(summary.Gauges, value) + } + sort.Slice(summary.Gauges, func(i, j int) bool { + return summary.Gauges[i].Hash < summary.Gauges[j].Hash + }) + + summary.Counters = formatSamples(interval.Counters) + summary.Samples = formatSamples(interval.Samples) + + return summary +} + +func formatSamples(source map[string]SampledValue) []SampledValue { + output := make([]SampledValue, 0, len(source)) + for hash, sample := range source { + displayLabels := make(map[string]string) + for _, label := range sample.Labels { + displayLabels[label.Name] = label.Value + } + + output = append(output, SampledValue{ + Name: sample.Name, + Hash: hash, + AggregateSample: sample.AggregateSample, + Mean: sample.AggregateSample.Mean(), + Stddev: sample.AggregateSample.Stddev(), + DisplayLabels: displayLabels, + }) + } + sort.Slice(output, func(i, j int) bool { + return output[i].Hash < output[j].Hash + }) + + return output +} + +type Encoder interface { + Encode(interface{}) error +} + +// Stream writes metrics using encoder.Encode each time an interval ends. Runs +// until the request context is cancelled, or the encoder returns an error. +// The caller is responsible for logging any errors from encoder. +func (i *InmemSink) Stream(ctx context.Context, encoder Encoder) { + interval := i.getInterval() + + for { + select { + case <-interval.done: + summary := newMetricSummaryFromInterval(interval) + if err := encoder.Encode(summary); err != nil { + return + } + + // update interval to the next one + interval = i.getInterval() + case <-ctx.Done(): + return + } + } +} diff --git a/vendor/github.com/armon/go-metrics/inmem_signal.go b/vendor/github.com/armon/go-metrics/inmem_signal.go new file mode 100644 index 00000000000..0937f4aedf7 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_signal.go @@ -0,0 +1,117 @@ +package metrics + +import ( + "bytes" + "fmt" + "io" + "os" + "os/signal" + "strings" + "sync" + "syscall" +) + +// InmemSignal is used to listen for a given signal, and when received, +// to dump the current metrics from the InmemSink to an io.Writer +type InmemSignal struct { + signal syscall.Signal + inm *InmemSink + w io.Writer + sigCh chan os.Signal + + stop bool + stopCh chan struct{} + stopLock sync.Mutex +} + +// NewInmemSignal creates a new InmemSignal which listens for a given signal, +// and dumps the current metrics out to a writer +func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { + i := &InmemSignal{ + signal: sig, + inm: inmem, + w: w, + sigCh: make(chan os.Signal, 1), + stopCh: make(chan struct{}), + } + signal.Notify(i.sigCh, sig) + go i.run() + return i +} + +// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 +// and writes output to stderr. Windows uses SIGBREAK +func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { + return NewInmemSignal(inmem, DefaultSignal, os.Stderr) +} + +// Stop is used to stop the InmemSignal from listening +func (i *InmemSignal) Stop() { + i.stopLock.Lock() + defer i.stopLock.Unlock() + + if i.stop { + return + } + i.stop = true + close(i.stopCh) + signal.Stop(i.sigCh) +} + +// run is a long running routine that handles signals +func (i *InmemSignal) run() { + for { + select { + case <-i.sigCh: + i.dumpStats() + case <-i.stopCh: + return + } + } +} + +// dumpStats is used to dump the data to output writer +func (i *InmemSignal) dumpStats() { + buf := bytes.NewBuffer(nil) + + data := i.inm.Data() + // Skip the last period which is still being aggregated + for j := 0; j < len(data)-1; j++ { + intv := data[j] + intv.RLock() + for _, val := range intv.Gauges { + name := i.flattenLabels(val.Name, val.Labels) + fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value) + } + for name, vals := range intv.Points { + for _, val := range vals { + fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) + } + } + for _, agg := range intv.Counters { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + for _, agg := range intv.Samples { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + intv.RUnlock() + } + + // Write out the bytes + i.w.Write(buf.Bytes()) +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSignal) flattenLabels(name string, labels []Label) string { + buf := bytes.NewBufferString(name) + replacer := strings.NewReplacer(" ", "_", ":", "_") + + for _, label := range labels { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, label.Value) + } + + return buf.String() +} diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go new file mode 100644 index 00000000000..36642a42937 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/metrics.go @@ -0,0 +1,299 @@ +package metrics + +import ( + "runtime" + "strings" + "time" + + iradix "github.com/hashicorp/go-immutable-radix" +) + +type Label struct { + Name string + Value string +} + +func (m *Metrics) SetGauge(key []string, val float32) { + m.SetGaugeWithLabels(key, val, nil) +} + +func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" { + if m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } else if m.EnableHostname { + key = insert(0, m.HostName, key) + } + } + if m.EnableTypePrefix { + key = insert(0, "gauge", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.SetGaugeWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) EmitKey(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "kv", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + allowed, _ := m.allowMetric(key, nil) + if !allowed { + return + } + m.sink.EmitKey(key, val) +} + +func (m *Metrics) IncrCounter(key []string, val float32) { + m.IncrCounterWithLabels(key, val, nil) +} + +func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "counter", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.IncrCounterWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) AddSample(key []string, val float32) { + m.AddSampleWithLabels(key, val, nil) +} + +func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "sample", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.AddSampleWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) MeasureSince(key []string, start time.Time) { + m.MeasureSinceWithLabels(key, start, nil) +} + +func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "timer", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + now := time.Now() + elapsed := now.Sub(start) + msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) + m.sink.AddSampleWithLabels(key, msec, labelsFiltered) +} + +// UpdateFilter overwrites the existing filter with the given rules. +func (m *Metrics) UpdateFilter(allow, block []string) { + m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels) +} + +// UpdateFilterAndLabels overwrites the existing filter with the given rules. +func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { + m.filterLock.Lock() + defer m.filterLock.Unlock() + + m.AllowedPrefixes = allow + m.BlockedPrefixes = block + + if allowedLabels == nil { + // Having a white list means we take only elements from it + m.allowedLabels = nil + } else { + m.allowedLabels = make(map[string]bool) + for _, v := range allowedLabels { + m.allowedLabels[v] = true + } + } + m.blockedLabels = make(map[string]bool) + for _, v := range blockedLabels { + m.blockedLabels[v] = true + } + m.AllowedLabels = allowedLabels + m.BlockedLabels = blockedLabels + + m.filter = iradix.New() + for _, prefix := range m.AllowedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), true) + } + for _, prefix := range m.BlockedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), false) + } +} + +func (m *Metrics) Shutdown() { + if ss, ok := m.sink.(ShutdownSink); ok { + ss.Shutdown() + } +} + +// labelIsAllowed return true if a should be included in metric +// the caller should lock m.filterLock while calling this method +func (m *Metrics) labelIsAllowed(label *Label) bool { + labelName := (*label).Name + if m.blockedLabels != nil { + _, ok := m.blockedLabels[labelName] + if ok { + // If present, let's remove this label + return false + } + } + if m.allowedLabels != nil { + _, ok := m.allowedLabels[labelName] + return ok + } + // Allow by default + return true +} + +// filterLabels return only allowed labels +// the caller should lock m.filterLock while calling this method +func (m *Metrics) filterLabels(labels []Label) []Label { + if labels == nil { + return nil + } + toReturn := []Label{} + for _, label := range labels { + if m.labelIsAllowed(&label) { + toReturn = append(toReturn, label) + } + } + return toReturn +} + +// Returns whether the metric should be allowed based on configured prefix filters +// Also return the applicable labels +func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) { + m.filterLock.RLock() + defer m.filterLock.RUnlock() + + if m.filter == nil || m.filter.Len() == 0 { + return m.Config.FilterDefault, m.filterLabels(labels) + } + + _, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, "."))) + if !ok { + return m.Config.FilterDefault, m.filterLabels(labels) + } + + return allowed.(bool), m.filterLabels(labels) +} + +// Periodically collects runtime stats to publish +func (m *Metrics) collectStats() { + for { + time.Sleep(m.ProfileInterval) + m.EmitRuntimeStats() + } +} + +// Emits various runtime statsitics +func (m *Metrics) EmitRuntimeStats() { + // Export number of Goroutines + numRoutines := runtime.NumGoroutine() + m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) + + // Export memory stats + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) + m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) + m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) + m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) + m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) + m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) + m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) + + // Export info about the last few GC runs + num := stats.NumGC + + // Handle wrap around + if num < m.lastNumGC { + m.lastNumGC = 0 + } + + // Ensure we don't scan more than 256 + if num-m.lastNumGC >= 256 { + m.lastNumGC = num - 255 + } + + for i := m.lastNumGC; i < num; i++ { + pause := stats.PauseNs[i%256] + m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) + } + m.lastNumGC = num +} + +// Creates a new slice with the provided string value as the first element +// and the provided slice values as the remaining values. +// Ordering of the values in the provided input slice is kept in tact in the output slice. +func insert(i int, v string, s []string) []string { + // Allocate new slice to avoid modifying the input slice + newS := make([]string, len(s)+1) + + // Copy s[0, i-1] into newS + for j := 0; j < i; j++ { + newS[j] = s[j] + } + + // Insert provided element at index i + newS[i] = v + + // Copy s[i, len(s)-1] into newS starting at newS[i+1] + for j := i; j < len(s); j++ { + newS[j+1] = s[j] + } + + return newS +} diff --git a/vendor/github.com/armon/go-metrics/sink.go b/vendor/github.com/armon/go-metrics/sink.go new file mode 100644 index 00000000000..6f4108ff405 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/sink.go @@ -0,0 +1,132 @@ +package metrics + +import ( + "fmt" + "net/url" +) + +// The MetricSink interface is used to transmit metrics information +// to an external system +type MetricSink interface { + // A Gauge should retain the last value it is set to + SetGauge(key []string, val float32) + SetGaugeWithLabels(key []string, val float32, labels []Label) + + // Should emit a Key/Value pair for each call + EmitKey(key []string, val float32) + + // Counters should accumulate values + IncrCounter(key []string, val float32) + IncrCounterWithLabels(key []string, val float32, labels []Label) + + // Samples are for timing information, where quantiles are used + AddSample(key []string, val float32) + AddSampleWithLabels(key []string, val float32, labels []Label) +} + +type ShutdownSink interface { + MetricSink + + // Shutdown the metric sink, flush metrics to storage, and cleanup resources. + // Called immediately prior to application exit. Implementations must block + // until metrics are flushed to storage. + Shutdown() +} + +// BlackholeSink is used to just blackhole messages +type BlackholeSink struct{} + +func (*BlackholeSink) SetGauge(key []string, val float32) {} +func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) EmitKey(key []string, val float32) {} +func (*BlackholeSink) IncrCounter(key []string, val float32) {} +func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) AddSample(key []string, val float32) {} +func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {} + +// FanoutSink is used to sink to fanout values to multiple sinks +type FanoutSink []MetricSink + +func (fh FanoutSink) SetGauge(key []string, val float32) { + fh.SetGaugeWithLabels(key, val, nil) +} + +func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.SetGaugeWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) EmitKey(key []string, val float32) { + for _, s := range fh { + s.EmitKey(key, val) + } +} + +func (fh FanoutSink) IncrCounter(key []string, val float32) { + fh.IncrCounterWithLabels(key, val, nil) +} + +func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.IncrCounterWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) AddSample(key []string, val float32) { + fh.AddSampleWithLabels(key, val, nil) +} + +func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.AddSampleWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) Shutdown() { + for _, s := range fh { + if ss, ok := s.(ShutdownSink); ok { + ss.Shutdown() + } + } +} + +// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided +// by each sink type +type sinkURLFactoryFunc func(*url.URL) (MetricSink, error) + +// sinkRegistry supports the generic NewMetricSink function by mapping URL +// schemes to metric sink factory functions +var sinkRegistry = map[string]sinkURLFactoryFunc{ + "statsd": NewStatsdSinkFromURL, + "statsite": NewStatsiteSinkFromURL, + "inmem": NewInmemSinkFromURL, +} + +// NewMetricSinkFromURL allows a generic URL input to configure any of the +// supported sinks. The scheme of the URL identifies the type of the sink, the +// and query parameters are used to set options. +// +// "statsd://" - Initializes a StatsdSink. The host and port are passed through +// as the "addr" of the sink +// +// "statsite://" - Initializes a StatsiteSink. The host and port become the +// "addr" of the sink +// +// "inmem://" - Initializes an InmemSink. The host and port are ignored. The +// "interval" and "duration" query parameters must be specified with valid +// durations, see NewInmemSink for details. +func NewMetricSinkFromURL(urlStr string) (MetricSink, error) { + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + + sinkURLFactoryFunc := sinkRegistry[u.Scheme] + if sinkURLFactoryFunc == nil { + return nil, fmt.Errorf( + "cannot create metric sink, unrecognized sink name: %q", u.Scheme) + } + + return sinkURLFactoryFunc(u) +} diff --git a/vendor/github.com/armon/go-metrics/start.go b/vendor/github.com/armon/go-metrics/start.go new file mode 100644 index 00000000000..38976f8dc93 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/start.go @@ -0,0 +1,158 @@ +package metrics + +import ( + "os" + "sync" + "sync/atomic" + "time" + + iradix "github.com/hashicorp/go-immutable-radix" +) + +// Config is used to configure metrics settings +type Config struct { + ServiceName string // Prefixed with keys to separate services + HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname + EnableHostname bool // Enable prefixing gauge values with hostname + EnableHostnameLabel bool // Enable adding hostname to labels + EnableServiceLabel bool // Enable adding service to labels + EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) + EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") + TimerGranularity time.Duration // Granularity of timers. + ProfileInterval time.Duration // Interval to profile runtime metrics + + AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator + BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator + AllowedLabels []string // A list of metric labels to allow, with '.' as the separator + BlockedLabels []string // A list of metric labels to block, with '.' as the separator + FilterDefault bool // Whether to allow metrics by default +} + +// Metrics represents an instance of a metrics sink that can +// be used to emit +type Metrics struct { + Config + lastNumGC uint32 + sink MetricSink + filter *iradix.Tree + allowedLabels map[string]bool + blockedLabels map[string]bool + filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access +} + +// Shared global metrics instance +var globalMetrics atomic.Value // *Metrics + +func init() { + // Initialize to a blackhole sink to avoid errors + globalMetrics.Store(&Metrics{sink: &BlackholeSink{}}) +} + +// Default returns the shared global metrics instance. +func Default() *Metrics { + return globalMetrics.Load().(*Metrics) +} + +// DefaultConfig provides a sane default configuration +func DefaultConfig(serviceName string) *Config { + c := &Config{ + ServiceName: serviceName, // Use client provided service + HostName: "", + EnableHostname: true, // Enable hostname prefix + EnableRuntimeMetrics: true, // Enable runtime profiling + EnableTypePrefix: false, // Disable type prefix + TimerGranularity: time.Millisecond, // Timers are in milliseconds + ProfileInterval: time.Second, // Poll runtime every second + FilterDefault: true, // Don't filter metrics by default + } + + // Try to get the hostname + name, _ := os.Hostname() + c.HostName = name + return c +} + +// New is used to create a new instance of Metrics +func New(conf *Config, sink MetricSink) (*Metrics, error) { + met := &Metrics{} + met.Config = *conf + met.sink = sink + met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels) + + // Start the runtime collector + if conf.EnableRuntimeMetrics { + go met.collectStats() + } + return met, nil +} + +// NewGlobal is the same as New, but it assigns the metrics object to be +// used globally as well as returning it. +func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { + metrics, err := New(conf, sink) + if err == nil { + globalMetrics.Store(metrics) + } + return metrics, err +} + +// Proxy all the methods to the globalMetrics instance +func SetGauge(key []string, val float32) { + globalMetrics.Load().(*Metrics).SetGauge(key, val) +} + +func SetGaugeWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels) +} + +func EmitKey(key []string, val float32) { + globalMetrics.Load().(*Metrics).EmitKey(key, val) +} + +func IncrCounter(key []string, val float32) { + globalMetrics.Load().(*Metrics).IncrCounter(key, val) +} + +func IncrCounterWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels) +} + +func AddSample(key []string, val float32) { + globalMetrics.Load().(*Metrics).AddSample(key, val) +} + +func AddSampleWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels) +} + +func MeasureSince(key []string, start time.Time) { + globalMetrics.Load().(*Metrics).MeasureSince(key, start) +} + +func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels) +} + +func UpdateFilter(allow, block []string) { + globalMetrics.Load().(*Metrics).UpdateFilter(allow, block) +} + +// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels +// and blockedLabels - when not nil - allow filtering of labels in order to +// block/allow globally labels (especially useful when having large number of +// values for a given label). See README.md for more information about usage. +func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { + globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels) +} + +// Shutdown disables metric collection, then blocks while attempting to flush metrics to storage. +// WARNING: Not all MetricSink backends support this functionality, and calling this will cause them to leak resources. +// This is intended for use immediately prior to application exit. +func Shutdown() { + m := globalMetrics.Load().(*Metrics) + // Swap whatever MetricSink is currently active with a BlackholeSink. Callers must not have a + // reason to expect that calls to the library will successfully collect metrics after Shutdown + // has been called. + globalMetrics.Store(&Metrics{sink: &BlackholeSink{}}) + m.Shutdown() +} diff --git a/vendor/github.com/armon/go-metrics/statsd.go b/vendor/github.com/armon/go-metrics/statsd.go new file mode 100644 index 00000000000..1bfffce46e2 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsd.go @@ -0,0 +1,184 @@ +package metrics + +import ( + "bytes" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // statsdMaxLen is the maximum size of a packet + // to send to statsd + statsdMaxLen = 1400 +) + +// StatsdSink provides a MetricSink that can be used +// with a statsite or statsd metrics server. It uses +// only UDP packets, while StatsiteSink uses TCP. +type StatsdSink struct { + addr string + metricQueue chan string +} + +// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsdSink(u.Host) +} + +// NewStatsdSink is used to create a new StatsdSink +func NewStatsdSink(addr string) (*StatsdSink, error) { + s := &StatsdSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsd +func (s *StatsdSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsdSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsdSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsdSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsdSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Create a buffer + buf := bytes.NewBuffer(nil) + + // Attempt to connect + sock, err = net.Dial("udp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsd! Err: %s", err) + goto WAIT + } + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Check if this would overflow the packet size + if len(metric)+buf.Len() > statsdMaxLen { + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error writing to statsd! Err: %s", err) + goto WAIT + } + } + + // Append to the buffer + buf.WriteString(metric) + + case <-ticker.C: + if buf.Len() == 0 { + continue + } + + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error flushing to statsd! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/armon/go-metrics/statsite.go b/vendor/github.com/armon/go-metrics/statsite.go new file mode 100644 index 00000000000..6c0d284d2dd --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsite.go @@ -0,0 +1,172 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // We force flush the statsite metrics after this period of + // inactivity. Prevents stats from getting stuck in a buffer + // forever. + flushInterval = 100 * time.Millisecond +) + +// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsiteSink(u.Host) +} + +// StatsiteSink provides a MetricSink that can be used with a +// statsite metrics server +type StatsiteSink struct { + addr string + metricQueue chan string +} + +// NewStatsiteSink is used to create a new StatsiteSink +func NewStatsiteSink(addr string) (*StatsiteSink, error) { + s := &StatsiteSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsite +func (s *StatsiteSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsiteSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsiteSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsiteSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsiteSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + var buffered *bufio.Writer + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Attempt to connect + sock, err = net.Dial("tcp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsite! Err: %s", err) + goto WAIT + } + + // Create a buffered writer + buffered = bufio.NewWriter(sock) + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Try to send to statsite + _, err := buffered.Write([]byte(metric)) + if err != nil { + log.Printf("[ERR] Error writing to statsite! Err: %s", err) + goto WAIT + } + case <-ticker.C: + if err := buffered.Flush(); err != nil { + log.Printf("[ERR] Error flushing to statsite! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/armon/go-radix/.gitignore b/vendor/github.com/armon/go-radix/.gitignore new file mode 100644 index 00000000000..00268614f04 --- /dev/null +++ b/vendor/github.com/armon/go-radix/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/armon/go-radix/.travis.yml b/vendor/github.com/armon/go-radix/.travis.yml new file mode 100644 index 00000000000..1a0bbea6c77 --- /dev/null +++ b/vendor/github.com/armon/go-radix/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: + - tip diff --git a/vendor/github.com/armon/go-radix/LICENSE b/vendor/github.com/armon/go-radix/LICENSE new file mode 100644 index 00000000000..a5df10e675d --- /dev/null +++ b/vendor/github.com/armon/go-radix/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-radix/README.md b/vendor/github.com/armon/go-radix/README.md new file mode 100644 index 00000000000..26f42a2837c --- /dev/null +++ b/vendor/github.com/armon/go-radix/README.md @@ -0,0 +1,38 @@ +go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix) +========= + +Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := radix.New() +r.Insert("foo", 1) +r.Insert("bar", 2) +r.Insert("foobar", 2) + +// Find the longest prefix match +m, _, _ := r.LongestPrefix("foozip") +if m != "foo" { + panic("should be foo") +} +``` + diff --git a/vendor/github.com/armon/go-radix/radix.go b/vendor/github.com/armon/go-radix/radix.go new file mode 100644 index 00000000000..e2bb22eb91d --- /dev/null +++ b/vendor/github.com/armon/go-radix/radix.go @@ -0,0 +1,540 @@ +package radix + +import ( + "sort" + "strings" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(s string, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + key string + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *node +} + +type node struct { + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix string + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *node) isLeaf() bool { + return n.leaf != nil +} + +func (n *node) addEdge(e edge) { + n.edges = append(n.edges, e) + n.edges.Sort() +} + +func (n *node) updateEdge(label byte, node *node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + n.edges[idx].node = node + return + } + panic("replacing missing edge") +} + +func (n *node) getEdge(label byte) *node { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return n.edges[idx].node + } + return nil +} + +func (n *node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} + +// Tree implements a radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over +// a standard hash map is prefix-based lookups and +// ordered iteration, +type Tree struct { + root *node + size int +} + +// New returns an empty Tree +func New() *Tree { + return NewFromMap(nil) +} + +// NewFromMap returns a new tree containing the keys +// from an existing map +func NewFromMap(m map[string]interface{}) *Tree { + t := &Tree{root: &node{}} + for k, v := range m { + t.Insert(k, v) + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// Insert is used to add a newentry or update +// an existing entry. Returns if updated. +func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { + var parent *node + n := t.root + search := s + for { + // Handle key exhaution + if len(search) == 0 { + if n.isLeaf() { + old := n.leaf.val + n.leaf.val = v + return old, true + } + + n.leaf = &leafNode{ + key: s, + val: v, + } + t.size++ + return nil, false + } + + // Look for the edge + parent = n + n = n.getEdge(search[0]) + + // No edge, create one + if n == nil { + e := edge{ + label: search[0], + node: &node{ + leaf: &leafNode{ + key: s, + val: v, + }, + prefix: search, + }, + } + parent.addEdge(e) + t.size++ + return nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + search = search[commonPrefix:] + continue + } + + // Split the node + t.size++ + child := &node{ + prefix: search[:commonPrefix], + } + parent.updateEdge(search[0], child) + + // Restore the existing node + child.addEdge(edge{ + label: n.prefix[commonPrefix], + node: n, + }) + n.prefix = n.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + key: s, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + child.leaf = leaf + return nil, false + } + + // Create a new edge for the node + child.addEdge(edge{ + label: search[0], + node: &node{ + leaf: leaf, + prefix: search, + }, + }) + return nil, false + } +} + +// Delete is used to delete a key, returning the previous +// value and if it was deleted +func (t *Tree) Delete(s string) (interface{}, bool) { + var parent *node + var label byte + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if !n.isLeaf() { + break + } + goto DELETE + } + + // Look for an edge + parent = n + label = search[0] + n = n.getEdge(label) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false + +DELETE: + // Delete the leaf + leaf := n.leaf + n.leaf = nil + t.size-- + + // Check if we should delete this node from the parent + if parent != nil && len(n.edges) == 0 { + parent.delEdge(label) + } + + // Check if we should merge this node + if n != t.root && len(n.edges) == 1 { + n.mergeChild() + } + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + + return leaf.val, true +} + +// DeletePrefix is used to delete the subtree under a prefix +// Returns how many nodes were deleted +// Use this to delete large subtrees efficiently +func (t *Tree) DeletePrefix(s string) int { + return t.deletePrefix(nil, t.root, s) +} + +// delete does a recursive deletion +func (t *Tree) deletePrefix(parent, n *node, prefix string) int { + // Check for key exhaustion + if len(prefix) == 0 { + // Remove the leaf node + subTreeSize := 0 + //recursively walk from all edges of the node to be deleted + recursiveWalk(n, func(s string, v interface{}) bool { + subTreeSize++ + return false + }) + if n.isLeaf() { + n.leaf = nil + } + n.edges = nil // deletes the entire subtree + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + t.size -= subTreeSize + return subTreeSize + } + + // Look for an edge + label := prefix[0] + child := n.getEdge(label) + if child == nil || (!strings.HasPrefix(child.prefix, prefix) && !strings.HasPrefix(prefix, child.prefix)) { + return 0 + } + + // Consume the search prefix + if len(child.prefix) > len(prefix) { + prefix = prefix[len(prefix):] + } else { + prefix = prefix[len(child.prefix):] + } + return t.deletePrefix(n, child, prefix) +} + +func (n *node) mergeChild() { + e := n.edges[0] + child := e.node + n.prefix = n.prefix + child.prefix + n.leaf = child.leaf + n.edges = child.edges +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(s string) (interface{}, bool) { + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.val, true + } + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { + var last *leafNode + n := t.root + search := s + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return "", nil, false +} + +// Minimum is used to return the minimum value in the tree +func (t *Tree) Minimum() (string, interface{}, bool) { + n := t.root + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return "", nil, false +} + +// Maximum is used to return the maximum value in the tree +func (t *Tree) Maximum() (string, interface{}, bool) { + n := t.root + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + break + } + return "", nil, false +} + +// Walk is used to walk the tree +func (t *Tree) Walk(fn WalkFn) { + recursiveWalk(t.root, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { + n := t.root + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if strings.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } + +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (t *Tree) WalkPath(path string, fn WalkFn) { + n := t.root + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// ToMap is used to walk the tree and convert it into a map +func (t *Tree) ToMap() map[string]interface{} { + out := make(map[string]interface{}, t.size) + t.Walk(func(k string, v interface{}) bool { + out[k] = v + return false + }) + return out +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md index 56b641cf7b1..fcf2947ba58 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md @@ -1,3 +1,571 @@ +# Release (2022-12-02) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.17.0](service/appsync/CHANGELOG.md#v1170-2022-12-02) + * **Feature**: Fixes the URI for the evaluatecode endpoint to include the /v1 prefix (ie. "/v1/dataplane-evaluatecode"). +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.20.1](service/ecs/CHANGELOG.md#v1201-2022-12-02) + * **Documentation**: Documentation updates for Amazon ECS +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.21.0](service/fms/CHANGELOG.md#v1210-2022-12-02) + * **Feature**: AWS Firewall Manager now supports Fortigate Cloud Native Firewall as a Service as a third-party policy type. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.28.0](service/mediaconvert/CHANGELOG.md#v1280-2022-12-02) + * **Feature**: The AWS Elemental MediaConvert SDK has added support for configurable ID3 eMSG box attributes and the ability to signal them with InbandEventStream tags in DASH and CMAF outputs. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.25.0](service/medialive/CHANGELOG.md#v1250-2022-12-02) + * **Feature**: Updates to Event Signaling and Management (ESAM) API and documentation. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.21.0](service/polly/CHANGELOG.md#v1210-2022-12-02) + * **Feature**: Add language code for Finnish (fi-FI) +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.18.0](service/proton/CHANGELOG.md#v1180-2022-12-02) + * **Feature**: CreateEnvironmentAccountConnection RoleArn input is now optional +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.3.0](service/redshiftserverless/CHANGELOG.md#v130-2022-12-02) + * **Feature**: Add Table Level Restore operations for Amazon Redshift Serverless. Add multi-port support for Amazon Redshift Serverless endpoints. Add Tagging support to Snapshots and Recovery Points in Amazon Redshift Serverless. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.18.7](service/sns/CHANGELOG.md#v1187-2022-12-02) + * **Documentation**: This release adds the message payload-filtering feature to the SNS Subscribe, SetSubscriptionAttributes, and GetSubscriptionAttributes API actions + +# Release (2022-12-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.0.0](service/codecatalyst/CHANGELOG.md#v100-2022-12-01) + * **Release**: New AWS service client module + * **Feature**: This release adds operations that support customers using the AWS Toolkits and Amazon CodeCatalyst, a unified software development service that helps developers develop, deploy, and maintain applications in the cloud. For more information, see the documentation. +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.20.0](service/comprehend/CHANGELOG.md#v1200-2022-12-01) + * **Feature**: Comprehend now supports semi-structured documents (such as PDF files or image files) as inputs for custom analysis using the synchronous APIs (ClassifyDocument and DetectEntities). +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.16.0](service/gamelift/CHANGELOG.md#v1160-2022-12-01) + * **Feature**: GameLift introduces a new feature, GameLift Anywhere. GameLift Anywhere allows you to integrate your own compute resources with GameLift. You can also use GameLift Anywhere to iteratively test your game servers without uploading the build to GameLift for every iteration. +* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.0.0](service/pipes/CHANGELOG.md#v100-2022-12-01) + * **Release**: New AWS service client module + * **Feature**: AWS introduces new Amazon EventBridge Pipes which allow you to connect sources (SQS, Kinesis, DDB, Kafka, MQ) to Targets (14+ EventBridge Targets) without any code, with filtering, batching, input transformation, and an optional Enrichment stage (Lambda, StepFunctions, ApiGateway, ApiDestinations) +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.16.0](service/sfn/CHANGELOG.md#v1160-2022-12-01) + * **Feature**: This release adds support for the AWS Step Functions Map state in Distributed mode. The changes include a new MapRun resource and several new and modified APIs. + +# Release (2022-11-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.18.0](service/accessanalyzer/CHANGELOG.md#v1180-2022-11-30) + * **Feature**: This release adds support for S3 cross account access points. IAM Access Analyzer will now produce public or cross account findings when it detects bucket delegation to external account access points. +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.20.0](service/athena/CHANGELOG.md#v1200-2022-11-30) + * **Feature**: This release includes support for using Apache Spark in Amazon Athena. +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.17.0](service/dataexchange/CHANGELOG.md#v1170-2022-11-30) + * **Feature**: This release enables data providers to license direct access to data in their Amazon S3 buckets or AWS Lake Formation data lakes through AWS Data Exchange. Subscribers get read-only access to the data and can use it in downstream AWS services, like Amazon Athena, without creating or managing copies. +* `github.com/aws/aws-sdk-go-v2/service/docdbelastic`: [v1.0.0](service/docdbelastic/CHANGELOG.md#v100-2022-11-30) + * **Release**: New AWS service client module + * **Feature**: Launched Amazon DocumentDB Elastic Clusters. You can now use the SDK to create, list, update and delete Amazon DocumentDB Elastic Cluster resources +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.37.0](service/glue/CHANGELOG.md#v1370-2022-11-30) + * **Feature**: This release adds support for AWS Glue Data Quality, which helps you evaluate and monitor the quality of your data and includes the API for creating, deleting, or updating data quality rulesets, runs and evaluations. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.28.0](service/s3control/CHANGELOG.md#v1280-2022-11-30) + * **Feature**: Amazon S3 now supports cross-account access points. S3 bucket owners can now allow trusted AWS accounts to create access points associated with their bucket. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.56.0](service/sagemaker/CHANGELOG.md#v1560-2022-11-30) + * **Feature**: Added Models as part of the Search API. Added Model shadow deployments in realtime inference, and shadow testing in managed inference. Added support for shared spaces, geospatial APIs, Model Cards, AutoMLJobStep in pipelines, Git repositories on user profiles and domains, Model sharing in Jumpstart. +* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.0.0](service/sagemakergeospatial/CHANGELOG.md#v100-2022-11-30) + * **Release**: New AWS service client module + * **Feature**: This release provides Amazon SageMaker geospatial APIs to build, train, deploy and visualize geospatial models. + +# Release (2022-11-29.2) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.74.0](service/ec2/CHANGELOG.md#v1740-2022-11-292) + * **Feature**: This release adds support for AWS Verified Access and the Hpc6id Amazon EC2 compute optimized instance type, which features 3rd generation Intel Xeon Scalable processors. +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.15.0](service/firehose/CHANGELOG.md#v1150-2022-11-292) + * **Feature**: Allow support for the Serverless offering for Amazon OpenSearch Service as a Kinesis Data Firehose delivery destination. +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.19.0](service/kms/CHANGELOG.md#v1190-2022-11-292) + * **Feature**: AWS KMS introduces the External Key Store (XKS), a new feature for customers who want to protect their data with encryption keys stored in an external key management system under their control. +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.0.0](service/omics/CHANGELOG.md#v100-2022-11-292) + * **Release**: New AWS service client module + * **Feature**: Amazon Omics is a new, purpose-built service that can be used by healthcare and life science organizations to store, query, and analyze omics data. The insights from that data can be used to accelerate scientific discoveries and improve healthcare. +* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.0.0](service/opensearchserverless/CHANGELOG.md#v100-2022-11-292) + * **Release**: New AWS service client module + * **Feature**: Publish SDK for Amazon OpenSearch Serverless +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.0.0](service/securitylake/CHANGELOG.md#v100-2022-11-292) + * **Release**: New AWS service client module + * **Feature**: Amazon Security Lake automatically centralizes security data from cloud, on-premises, and custom sources into a purpose-built data lake stored in your account. Security Lake makes it easier to analyze security data, so you can improve the protection of your workloads, applications, and data +* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.0.0](service/simspaceweaver/CHANGELOG.md#v100-2022-11-292) + * **Release**: New AWS service client module + * **Feature**: AWS SimSpace Weaver is a new service that helps customers build spatial simulations at new levels of scale - resulting in virtual worlds with millions of dynamic entities. See the AWS SimSpace Weaver developer guide for more details on how to get started. https://docs.aws.amazon.com/simspaceweaver + +# Release (2022-11-29) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/arczonalshift`: [v1.0.0](service/arczonalshift/CHANGELOG.md#v100-2022-11-29) + * **Release**: New AWS service client module + * **Feature**: Amazon Route 53 Application Recovery Controller Zonal Shift is a new service that makes it easy to shift traffic away from an Availability Zone in a Region. See the developer guide for more information: https://docs.aws.amazon.com/r53recovery/latest/dg/what-is-route53-recovery.html +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.18.0](service/computeoptimizer/CHANGELOG.md#v1180-2022-11-29) + * **Feature**: Adds support for a new recommendation preference that makes it possible for customers to optimize their EC2 recommendations by utilizing an external metrics ingestion service to provide metrics. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.28.0](service/configservice/CHANGELOG.md#v1280-2022-11-29) + * **Feature**: With this release, you can use AWS Config to evaluate your resources for compliance with Config rules before they are created or updated. Using Config rules in proactive mode enables you to test and build compliant resource templates or check resource configurations at the time they are provisioned. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.73.0](service/ec2/CHANGELOG.md#v1730-2022-11-29) + * **Feature**: Introduces ENA Express, which uses AWS SRD and dynamic routing to increase throughput and minimize latency, adds support for trust relationships between Reachability Analyzer and AWS Organizations to enable cross-account analysis, and adds support for Infrastructure Performance metric subscriptions. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.24.0](service/eks/CHANGELOG.md#v1240-2022-11-29) + * **Feature**: Adds support for additional EKS add-ons metadata and filtering fields +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.26.0](service/fsx/CHANGELOG.md#v1260-2022-11-29) + * **Feature**: This release adds support for 4GB/s / 160K PIOPS FSx for ONTAP file systems and 10GB/s / 350K PIOPS FSx for OpenZFS file systems (Single_AZ_2). For FSx for ONTAP, this also adds support for DP volumes, snapshot policy, copy tags to backups, and Multi-AZ route table updates. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.36.0](service/glue/CHANGELOG.md#v1360-2022-11-29) + * **Feature**: This release allows the creation of Custom Visual Transforms (Dynamic Transforms) to be created via AWS Glue CLI/SDK. +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.9.0](service/inspector2/CHANGELOG.md#v190-2022-11-29) + * **Feature**: This release adds support for Inspector to scan AWS Lambda. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.26.0](service/lambda/CHANGELOG.md#v1260-2022-11-29) + * **Feature**: Adds support for Lambda SnapStart, which helps improve the startup performance of functions. Customers can now manage SnapStart based functions via CreateFunction and UpdateFunctionConfiguration APIs +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.1.0](service/licensemanagerusersubscriptions/CHANGELOG.md#v110-2022-11-29) + * **Feature**: AWS now offers fully-compliant, Amazon-provided licenses for Microsoft Office Professional Plus 2021 Amazon Machine Images (AMIs) on Amazon EC2. These AMIs are now available on the Amazon EC2 console and on AWS Marketplace to launch instances on-demand without any long-term licensing commitments. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.24.0](service/macie2/CHANGELOG.md#v1240-2022-11-29) + * **Feature**: Added support for configuring Macie to continually sample objects from S3 buckets and inspect them for sensitive data. Results appear in statistics, findings, and other data that Macie provides. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.28.0](service/quicksight/CHANGELOG.md#v1280-2022-11-29) + * **Feature**: This release adds new Describe APIs and updates Create and Update APIs to support the data model for Dashboards, Analyses, and Templates. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.27.0](service/s3control/CHANGELOG.md#v1270-2022-11-29) + * **Feature**: Added two new APIs to support Amazon S3 Multi-Region Access Point failover controls: GetMultiRegionAccessPointRoutes and SubmitMultiRegionAccessPointRoutes. The failover control APIs are supported in the following Regions: us-east-1, us-west-2, eu-west-1, ap-southeast-2, and ap-northeast-1. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.25.0](service/securityhub/CHANGELOG.md#v1250-2022-11-29) + * **Feature**: Adding StandardsManagedBy field to DescribeStandards API response + +# Release (2022-11-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.18.0](service/backup/CHANGELOG.md#v1180-2022-11-28) + * **Feature**: AWS Backup introduces support for legal hold and application stack backups. AWS Backup Audit Manager introduces support for cross-Region, cross-account reports. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.22.0](service/cloudwatch/CHANGELOG.md#v1220-2022-11-28) + * **Feature**: Adds cross-account support to the GetMetricData API. Adds cross-account support to the ListMetrics API through the usage of the IncludeLinkedAccounts flag and the new OwningAccounts field. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.17.0](service/cloudwatchlogs/CHANGELOG.md#v1170-2022-11-28) + * **Feature**: Updates to support CloudWatch Logs data protection and CloudWatch cross-account observability +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.9.0](service/drs/CHANGELOG.md#v190-2022-11-28) + * **Feature**: Non breaking changes to existing APIs, and additional APIs added to support in-AWS failing back using AWS Elastic Disaster Recovery. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.20.0](service/ecs/CHANGELOG.md#v1200-2022-11-28) + * **Feature**: This release adds support for ECS Service Connect, a new capability that simplifies writing and operating resilient distributed applications. This release updates the TaskDefinition, Cluster, Service mutation APIs with Service connect constructs and also adds a new ListServicesByNamespace API. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.18.0](service/efs/CHANGELOG.md#v1180-2022-11-28) + * **Feature**: This release adds elastic as a new ThroughputMode value for EFS file systems and adds AFTER_1_DAY as a value for TransitionToIARules. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.32.0](service/iot/CHANGELOG.md#v1320-2022-11-28) + * **Feature**: Job scheduling enables the scheduled rollout of a Job with start and end times and a customizable end behavior when end time is reached. This is available for continuous and snapshot jobs. Added support for MQTT5 properties to AWS IoT TopicRule Republish Action. +* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.13.0](service/iotdataplane/CHANGELOG.md#v1130-2022-11-28) + * **Feature**: This release adds support for MQTT5 properties to AWS IoT HTTP Publish API. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.23.0](service/iotwireless/CHANGELOG.md#v1230-2022-11-28) + * **Feature**: This release includes a new feature for customers to calculate the position of their devices by adding three new APIs: UpdateResourcePosition, GetResourcePosition, and GetPositionEstimate. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.36.0](service/kendra/CHANGELOG.md#v1360-2022-11-28) + * **Feature**: Amazon Kendra now supports preview of table information from HTML tables in the search results. The most relevant cells with their corresponding rows, columns are displayed as a preview in the search result. The most relevant table cell or cells are also highlighted in table preview. +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.16.0](service/mgn/CHANGELOG.md#v1160-2022-11-28) + * **Feature**: This release adds support for Application and Wave management. We also now support custom post-launch actions. +* `github.com/aws/aws-sdk-go-v2/service/oam`: [v1.0.0](service/oam/CHANGELOG.md#v100-2022-11-28) + * **Release**: New AWS service client module + * **Feature**: Amazon CloudWatch Observability Access Manager is a new service that allows configuration of the CloudWatch cross-account observability feature. +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.17.0](service/organizations/CHANGELOG.md#v1170-2022-11-28) + * **Feature**: This release introduces delegated administrator for AWS Organizations, a new feature to help you delegate the management of your Organizations policies, enabling you to govern your AWS organization in a decentralized way. You can now allow member accounts to manage Organizations policies. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.31.0](service/rds/CHANGELOG.md#v1310-2022-11-28) + * **Feature**: This release enables new Aurora and RDS feature called Blue/Green Deployments that makes updates to databases safer, simpler and faster. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.19.0](service/textract/CHANGELOG.md#v1190-2022-11-28) + * **Feature**: This release adds support for classifying and splitting lending documents by type, and extracting information by using the Analyze Lending APIs. This release also includes support for summarized information of the processed lending document package, in addition to per document results. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.22.0](service/transcribe/CHANGELOG.md#v1220-2022-11-28) + * **Feature**: This release adds support for 'inputType' for post-call and real-time (streaming) Call Analytics within Amazon Transcribe. +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.8.0](service/transcribestreaming/CHANGELOG.md#v180-2022-11-28) + * **Feature**: This release adds support for real-time (streaming) and post-call Call Analytics within Amazon Transcribe. + +# Release (2022-11-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.10.0](service/grafana/CHANGELOG.md#v1100-2022-11-23) + * **Feature**: This release includes support for configuring a Grafana workspace to connect to a datasource within a VPC as well as new APIs for configuring Grafana settings. +* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.7.0](service/rbin/CHANGELOG.md#v170-2022-11-23) + * **Feature**: This release adds support for Rule Lock for Recycle Bin, which allows you to lock retention rules so that they can no longer be modified or deleted. + +# Release (2022-11-22) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.21.0](service/appflow/CHANGELOG.md#v1210-2022-11-22) + * **Feature**: Adding support for Amazon AppFlow to transfer the data to Amazon Redshift databases through Amazon Redshift Data API service. This feature will support the Redshift destination connector on both public and private accessible Amazon Redshift Clusters and Amazon Redshift Serverless. +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.15.0](service/kinesisanalyticsv2/CHANGELOG.md#v1150-2022-11-22) + * **Feature**: Support for Apache Flink 1.15 in Kinesis Data Analytics. + +# Release (2022-11-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.25.0](service/route53/CHANGELOG.md#v1250-2022-11-21) + * **Feature**: Amazon Route 53 now supports the Asia Pacific (Hyderabad) Region (ap-south-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. + +# Release (2022-11-18.2) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.0.1](service/ssmsap/CHANGELOG.md#v101-2022-11-182) + * **Bug Fix**: Removes old model file for ssm sap and uses the new model file to regenerate client + +# Release (2022-11-18) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.20.0](service/appflow/CHANGELOG.md#v1200-2022-11-18) + * **Feature**: AppFlow provides a new API called UpdateConnectorRegistration to update a custom connector that customers have previously registered. With this API, customers no longer need to unregister and then register a connector to make an update. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.21.0](service/auditmanager/CHANGELOG.md#v1210-2022-11-18) + * **Feature**: This release introduces a new feature for Audit Manager: Evidence finder. You can now use evidence finder to quickly query your evidence, and add the matching evidence results to an assessment report. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.0.0](service/chimesdkvoice/CHANGELOG.md#v100-2022-11-18) + * **Release**: New AWS service client module + * **Feature**: Amazon Chime Voice Connector, Voice Connector Group and PSTN Audio Service APIs are now available in the Amazon Chime SDK Voice namespace. See https://docs.aws.amazon.com/chime-sdk/latest/dg/sdk-available-regions.html for more details. +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.21.0](service/cloudfront/CHANGELOG.md#v1210-2022-11-18) + * **Feature**: CloudFront API support for staging distributions and associated traffic management policies. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.38.0](service/connect/CHANGELOG.md#v1380-2022-11-18) + * **Feature**: Added AllowedAccessControlTags and TagRestrictedResource for Tag Based Access Control on Amazon Connect Webpage +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.17.6](service/dynamodb/CHANGELOG.md#v1176-2022-11-18) + * **Documentation**: Updated minor fixes for DynamoDB documentation. +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.13.25](service/dynamodbstreams/CHANGELOG.md#v11325-2022-11-18) + * **Documentation**: Updated minor fixes for DynamoDB documentation. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.72.0](service/ec2/CHANGELOG.md#v1720-2022-11-18) + * **Feature**: This release adds support for copying an Amazon Machine Image's tags when copying an AMI. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.35.0](service/glue/CHANGELOG.md#v1350-2022-11-18) + * **Feature**: AWSGlue Crawler - Adding support for Table and Column level Comments with database level datatypes for JDBC based crawler. +* `github.com/aws/aws-sdk-go-v2/service/iotroborunner`: [v1.0.0](service/iotroborunner/CHANGELOG.md#v100-2022-11-18) + * **Release**: New AWS service client module + * **Feature**: AWS IoT RoboRunner is a new service that makes it easy to build applications that help multi-vendor robots work together seamlessly. See the IoT RoboRunner developer guide for more details on getting started. https://docs.aws.amazon.com/iotroborunner/latest/dev/iotroborunner-welcome.html +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.27.0](service/quicksight/CHANGELOG.md#v1270-2022-11-18) + * **Feature**: This release adds the following: 1) Asset management for centralized assets governance 2) QuickSight Q now supports public embedding 3) New Termination protection flag to mitigate accidental deletes 4) Athena data sources now accept a custom IAM role 5) QuickSight supports connectivity to Databricks +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.55.0](service/sagemaker/CHANGELOG.md#v1550-2022-11-18) + * **Feature**: Added DisableProfiler flag as a new field in ProfilerConfig +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.15.0](service/servicecatalog/CHANGELOG.md#v1150-2022-11-18) + * **Feature**: This release 1. adds support for Principal Name Sharing with Service Catalog portfolio sharing. 2. Introduces repo sourced products which are created and managed with existing SC APIs. These products are synced to external repos and auto create new product versions based on changes in the repo. +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.15.0](service/sfn/CHANGELOG.md#v1150-2022-11-18) + * **Feature**: This release adds support for using Step Functions service integrations to invoke any cross-account AWS resource, even if that service doesn't support resource-based policies or cross-account calls. See https://docs.aws.amazon.com/step-functions/latest/dg/concepts-access-cross-acct-resources.html +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.25.0](service/transfer/CHANGELOG.md#v1250-2022-11-18) + * **Feature**: Adds a NONE encryption algorithm type to AS2 connectors, providing support for skipping encryption of the AS2 message body when a HTTPS URL is also specified. + +# Release (2022-11-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.12.0](service/amplify/CHANGELOG.md#v1120-2022-11-17) + * **Feature**: Adds a new value (WEB_COMPUTE) to the Platform enum that allows customers to create Amplify Apps with Server-Side Rendering support. +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.19.0](service/appflow/CHANGELOG.md#v1190-2022-11-17) + * **Feature**: AppFlow simplifies the preparation and cataloging of SaaS data into the AWS Glue Data Catalog where your data can be discovered and accessed by AWS analytics and ML services. AppFlow now also supports data field partitioning and file size optimization to improve query performance and reduce cost. +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.16.0](service/appsync/CHANGELOG.md#v1160-2022-11-17) + * **Feature**: This release introduces the APPSYNC_JS runtime, and adds support for JavaScript in AppSync functions and AppSync pipeline resolvers. +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.22.0](service/databasemigrationservice/CHANGELOG.md#v1220-2022-11-17) + * **Feature**: Adds support for Internet Protocol Version 6 (IPv6) on DMS Replication Instances +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.71.0](service/ec2/CHANGELOG.md#v1710-2022-11-17) + * **Feature**: This release adds a new optional parameter "privateIpAddress" for the CreateNatGateway API. PrivateIPAddress will allow customers to select a custom Private IPv4 address instead of having it be auto-assigned. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.18.25](service/elasticloadbalancingv2/CHANGELOG.md#v11825-2022-11-17) + * **Documentation**: Provides new target group attributes to turn on/off cross zone load balancing and configure target group health for Network Load Balancers and Application Load Balancers. Provides improvements to health check configuration for Network Load Balancers. +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.4.0](service/emrserverless/CHANGELOG.md#v140-2022-11-17) + * **Feature**: Adds support for AWS Graviton2 based applications. You can now select CPU architecture when creating new applications or updating existing ones. +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.1.0](service/ivschat/CHANGELOG.md#v110-2022-11-17) + * **Feature**: Adds LoggingConfiguration APIs for IVS Chat - a feature that allows customers to store and record sent messages in a chat room to S3 buckets, CloudWatch logs, or Kinesis firehose. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.25.0](service/lambda/CHANGELOG.md#v1250-2022-11-17) + * **Feature**: Add Node 18 (nodejs18.x) support to AWS Lambda. +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.22.0](service/personalize/CHANGELOG.md#v1220-2022-11-17) + * **Feature**: This release provides support for creation and use of metric attributions in AWS Personalize +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.20.0](service/polly/CHANGELOG.md#v1200-2022-11-17) + * **Feature**: Add two new neural voices - Ola (pl-PL) and Hala (ar-AE). +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.8.0](service/rum/CHANGELOG.md#v180-2022-11-17) + * **Feature**: CloudWatch RUM now supports custom events. To use custom events, create an app monitor or update an app monitor with CustomEvent Status as ENABLED. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.26.0](service/s3control/CHANGELOG.md#v1260-2022-11-17) + * **Feature**: Added 34 new S3 Storage Lens metrics to support additional customer use cases. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.16.7](service/secretsmanager/CHANGELOG.md#v1167-2022-11-17) + * **Documentation**: Documentation updates for Secrets Manager. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.24.0](service/securityhub/CHANGELOG.md#v1240-2022-11-17) + * **Feature**: Added SourceLayerArn and SourceLayerHash field for security findings. Updated AwsLambdaFunction Resource detail +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.15.0](service/servicecatalogappregistry/CHANGELOG.md#v1150-2022-11-17) + * **Feature**: This release adds support for tagged resource associations, which allows you to associate a group of resources with a defined resource tag key and value to the application. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.17.4](service/sts/CHANGELOG.md#v1174-2022-11-17) + * **Documentation**: Documentation updates for AWS Security Token Service. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.18.0](service/textract/CHANGELOG.md#v1180-2022-11-17) + * **Feature**: This release adds support for specifying and extracting information from documents using the Signatures feature within Analyze Document API +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.27.0](service/workspaces/CHANGELOG.md#v1270-2022-11-17) + * **Feature**: The release introduces CreateStandbyWorkspaces, an API that allows you to create standby WorkSpaces associated with a primary WorkSpace in another Region. DescribeWorkspaces now includes related WorkSpaces properties. DescribeWorkspaceBundles and CreateWorkspaceBundle now return more bundle details. + +# Release (2022-11-16) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.19.1](service/batch/CHANGELOG.md#v1191-2022-11-16) + * **Documentation**: Documentation updates related to Batch on EKS +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.2.0](service/billingconductor/CHANGELOG.md#v120-2022-11-16) + * **Feature**: This release adds a new feature BillingEntity pricing rule. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.24.0](service/cloudformation/CHANGELOG.md#v1240-2022-11-16) + * **Feature**: Added UnsupportedTarget HandlerErrorCode for use with CFN Resource Hooks +* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.14.0](service/comprehendmedical/CHANGELOG.md#v1140-2022-11-16) + * **Feature**: This release supports new set of entities and traits. It also adds new category (BEHAVIORAL_ENVIRONMENTAL_SOCIAL). +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.37.0](service/connect/CHANGELOG.md#v1370-2022-11-16) + * **Feature**: This release adds a new MonitorContact API for initiating monitoring of ongoing Voice and Chat contacts. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.23.0](service/eks/CHANGELOG.md#v1230-2022-11-16) + * **Feature**: Adds support for customer-provided placement groups for Kubernetes control plane instances when creating local EKS clusters on Outposts +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.24.0](service/elasticache/CHANGELOG.md#v1240-2022-11-16) + * **Feature**: for Redis now supports AWS Identity and Access Management authentication access to Redis clusters starting with redis-engine version 7.0 +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.8.0](service/iottwinmaker/CHANGELOG.md#v180-2022-11-16) + * **Feature**: This release adds the following: 1) ExecuteQuery API allows users to query their AWS IoT TwinMaker Knowledge Graph 2) Pricing plan APIs allow users to configure and manage their pricing mode 3) Support for property groups and tabular property values in existing AWS IoT TwinMaker APIs. +* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.12.0](service/personalizeevents/CHANGELOG.md#v1120-2022-11-16) + * **Feature**: This release provides support for creation and use of metric attributions in AWS Personalize +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.17.0](service/proton/CHANGELOG.md#v1170-2022-11-16) + * **Feature**: Add support for sorting and filtering in ListServiceInstances +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.30.0](service/rds/CHANGELOG.md#v1300-2022-11-16) + * **Feature**: This release adds support for container databases (CDBs) to Amazon RDS Custom for Oracle. A CDB contains one PDB at creation. You can add more PDBs using Oracle SQL. You can also customize your database installation by setting the Oracle base, Oracle home, and the OS user name and group. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.33.0](service/ssm/CHANGELOG.md#v1330-2022-11-16) + * **Feature**: This release adds support for cross account access in CreateOpsItem, UpdateOpsItem and GetOpsItem. It introduces new APIs to setup resource policies for SSM resources: PutResourcePolicy, GetResourcePolicies and DeleteResourcePolicy. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.19.0](service/ssmincidents/CHANGELOG.md#v1190-2022-11-16) + * **Feature**: Add support for PagerDuty integrations on ResponsePlan, IncidentRecord, and RelatedItem APIs +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.24.0](service/transfer/CHANGELOG.md#v1240-2022-11-16) + * **Feature**: Allow additional operations to throw ThrottlingException +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.15.0](service/xray/CHANGELOG.md#v1150-2022-11-16) + * **Feature**: This release adds new APIs - PutResourcePolicy, DeleteResourcePolicy, ListResourcePolicies for supporting resource based policies for AWS X-Ray. + +# Release (2022-11-15) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.36.0](service/connect/CHANGELOG.md#v1360-2022-11-15) + * **Feature**: This release updates the APIs: UpdateInstanceAttribute, DescribeInstanceAttribute, and ListInstanceAttributes. You can use it to programmatically enable/disable enhanced contact monitoring using attribute type ENHANCED_CONTACT_MONITORING on the specified Amazon Connect instance. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.20.0](service/greengrassv2/CHANGELOG.md#v1200-2022-11-15) + * **Feature**: Adds new parent target ARN paramater to CreateDeployment, GetDeployment, and ListDeployments APIs for the new subdeployments feature. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.24.0](service/route53/CHANGELOG.md#v1240-2022-11-15) + * **Feature**: Amazon Route 53 now supports the Europe (Spain) Region (eu-south-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. +* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.0.0](service/ssmsap/CHANGELOG.md#v100-2022-11-15) + * **Release**: New AWS service client module + * **Feature**: AWS Systems Manager for SAP provides simplified operations and management of SAP applications such as SAP HANA. With this release, SAP customers and partners can automate and simplify their SAP system administration tasks such as backup/restore of SAP HANA. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.26.0](service/workspaces/CHANGELOG.md#v1260-2022-11-15) + * **Feature**: This release introduces ModifyCertificateBasedAuthProperties, a new API that allows control of certificate-based auth properties associated with a WorkSpaces directory. The DescribeWorkspaceDirectories API will now additionally return certificate-based auth properties in its responses. + +# Release (2022-11-14) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.20.0](service/customerprofiles/CHANGELOG.md#v1200-2022-11-14) + * **Feature**: This release enhances the SearchProfiles API by providing functionality to search for profiles using multiple keys and logical operators. +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.18.0](service/lakeformation/CHANGELOG.md#v1180-2022-11-14) + * **Feature**: This release adds a new parameter "Parameters" in the DataLakeSettings. +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.13.3](service/managedblockchain/CHANGELOG.md#v1133-2022-11-14) + * **Documentation**: Updating the API docs data type: NetworkEthereumAttributes, and the operations DeleteNode, and CreateNode to also include the supported Goerli network. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.16.0](service/proton/CHANGELOG.md#v1160-2022-11-14) + * **Feature**: Add support for CodeBuild Provisioning +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.29.0](service/rds/CHANGELOG.md#v1290-2022-11-14) + * **Feature**: This release adds support for restoring an RDS Multi-AZ DB cluster snapshot to a Single-AZ deployment or a Multi-AZ DB instance deployment. +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.12.0](service/workdocs/CHANGELOG.md#v1120-2022-11-14) + * **Feature**: Added 2 new document related operations, DeleteDocumentVersion and RestoreDocumentVersions. +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.14.0](service/xray/CHANGELOG.md#v1140-2022-11-14) + * **Feature**: This release enhances GetServiceGraph API to support new type of edge to represent links between SQS and Lambda in event-driven applications. + +# Release (2022-11-11) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.18.0](config/CHANGELOG.md#v1180-2022-11-11) + * **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 + * **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider +* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.13.0](credentials/CHANGELOG.md#v1130-2022-11-11) + * **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 + * **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.34.1](service/glue/CHANGELOG.md#v1341-2022-11-11) + * **Documentation**: Added links related to enabling job bookmarks. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.31.0](service/iot/CHANGELOG.md#v1310-2022-11-11) + * **Feature**: This release add new api listRelatedResourcesForAuditFinding and new member type IssuerCertificates for Iot device device defender Audit. +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.16.0](service/licensemanager/CHANGELOG.md#v1160-2022-11-11) + * **Feature**: AWS License Manager now supports onboarded Management Accounts or Delegated Admins to view granted licenses aggregated from all accounts in the organization. +* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.14.0](service/marketplacecatalog/CHANGELOG.md#v1140-2022-11-11) + * **Feature**: Added three new APIs to support tagging and tag-based authorization: TagResource, UntagResource, and ListTagsForResource. Added optional parameters to the StartChangeSet API to support tagging a resource while making a request to create it. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.21.0](service/rekognition/CHANGELOG.md#v1210-2022-11-11) + * **Feature**: Adding support for ImageProperties feature to detect dominant colors and image brightness, sharpness, and contrast, inclusion and exclusion filters for labels and label categories, new fields to the API response, "aliases" and "categories" +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.23.8](service/securityhub/CHANGELOG.md#v1238-2022-11-11) + * **Documentation**: Documentation updates for Security Hub +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.18.0](service/ssmincidents/CHANGELOG.md#v1180-2022-11-11) + * **Feature**: RelatedItems now have an ID field which can be used for referencing them else where. Introducing event references in TimelineEvent API and increasing maximum length of "eventData" to 12K characters. + +# Release (2022-11-10) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.24.1](service/autoscaling/CHANGELOG.md#v1241-2022-11-10) + * **Documentation**: This release adds a new price capacity optimized allocation strategy for Spot Instances to help customers optimize provisioning of Spot Instances via EC2 Auto Scaling, EC2 Fleet, and Spot Fleet. It allocates Spot Instances based on both spare capacity availability and Spot Instance price. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.70.0](service/ec2/CHANGELOG.md#v1700-2022-11-10) + * **Feature**: This release adds a new price capacity optimized allocation strategy for Spot Instances to help customers optimize provisioning of Spot Instances via EC2 Auto Scaling, EC2 Fleet, and Spot Fleet. It allocates Spot Instances based on both spare capacity availability and Spot Instance price. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.19.0](service/ecs/CHANGELOG.md#v1190-2022-11-10) + * **Feature**: This release adds support for task scale-in protection with updateTaskProtection and getTaskProtection APIs. UpdateTaskProtection API can be used to protect a service managed task from being terminated by scale-in events and getTaskProtection API to get the scale-in protection status of a task. +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.17.0](service/elasticsearchservice/CHANGELOG.md#v1170-2022-11-10) + * **Feature**: Amazon OpenSearch Service now offers managed VPC endpoints to connect to your Amazon OpenSearch Service VPC-enabled domain in a Virtual Private Cloud (VPC). This feature allows you to privately access OpenSearch Service domain without using public IPs or requiring traffic to traverse the Internet. +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.0.1](service/resourceexplorer2/CHANGELOG.md#v101-2022-11-10) + * **Documentation**: Text only updates to some Resource Explorer descriptions. +* `github.com/aws/aws-sdk-go-v2/service/scheduler`: [v1.0.0](service/scheduler/CHANGELOG.md#v100-2022-11-10) + * **Release**: New AWS service client module + * **Feature**: AWS introduces the new Amazon EventBridge Scheduler. EventBridge Scheduler is a serverless scheduler that allows you to create, run, and manage tasks from one central, managed service. + +# Release (2022-11-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.35.0](service/connect/CHANGELOG.md#v1350-2022-11-09) + * **Feature**: This release adds new fields SignInUrl, UserArn, and UserId to GetFederationToken response payload. +* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.1.0](service/connectcases/CHANGELOG.md#v110-2022-11-09) + * **Feature**: This release adds the ability to disable templates through the UpdateTemplate API. Disabling templates prevents customers from creating cases using the template. For more information see https://docs.aws.amazon.com/cases/latest/APIReference/Welcome.html +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.69.0](service/ec2/CHANGELOG.md#v1690-2022-11-09) + * **Feature**: Amazon EC2 Trn1 instances, powered by AWS Trainium chips, are purpose built for high-performance deep learning training. u-24tb1.112xlarge and u-18tb1.112xlarge High Memory instances are purpose-built to run large in-memory databases. +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.14.0](service/groundstation/CHANGELOG.md#v1140-2022-11-09) + * **Feature**: This release adds the preview of customer-provided ephemeris support for AWS Ground Station, allowing space vehicle owners to provide their own position and trajectory information for a satellite. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.19.0](service/mediapackagevod/CHANGELOG.md#v1190-2022-11-09) + * **Feature**: This release adds "IncludeIframeOnlyStream" for Dash endpoints. +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.7.0](service/transcribestreaming/CHANGELOG.md#v170-2022-11-09) + * **Feature**: This will release hi-IN and th-TH + +# Release (2022-11-08) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.16.0](service/acm/CHANGELOG.md#v1160-2022-11-08) + * **Feature**: Support added for requesting elliptic curve certificate key algorithm types P-256 (EC_prime256v1) and P-384 (EC_secp384r1). +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.1.0](service/billingconductor/CHANGELOG.md#v110-2022-11-08) + * **Feature**: This release adds the Recurring Custom Line Item feature along with a new API ListCustomLineItemVersions. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.68.0](service/ec2/CHANGELOG.md#v1680-2022-11-08) + * **Feature**: This release enables sharing of EC2 Placement Groups across accounts and within AWS Organizations using Resource Access Manager +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.20.0](service/fms/CHANGELOG.md#v1200-2022-11-08) + * **Feature**: AWS Firewall Manager now supports importing existing AWS Network Firewall firewalls into Firewall Manager policies. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.24.0](service/lightsail/CHANGELOG.md#v1240-2022-11-08) + * **Feature**: This release adds support for Amazon Lightsail to automate the delegation of domains registered through Amazon Route 53 to Lightsail DNS management and to automate record creation for DNS validation of Lightsail SSL/TLS certificates. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.11.0](service/opensearch/CHANGELOG.md#v1110-2022-11-08) + * **Feature**: Amazon OpenSearch Service now offers managed VPC endpoints to connect to your Amazon OpenSearch Service VPC-enabled domain in a Virtual Private Cloud (VPC). This feature allows you to privately access OpenSearch Service domain without using public IPs or requiring traffic to traverse the Internet. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.19.0](service/polly/CHANGELOG.md#v1190-2022-11-08) + * **Feature**: Amazon Polly adds new voices: Elin (sv-SE), Ida (nb-NO), Laura (nl-NL) and Suvi (fi-FI). They are available as neural voices only. +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.0.0](service/resourceexplorer2/CHANGELOG.md#v100-2022-11-08) + * **Release**: New AWS service client module + * **Feature**: This is the initial SDK release for AWS Resource Explorer. AWS Resource Explorer lets your users search for and discover your AWS resources across the AWS Regions in your account. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.23.0](service/route53/CHANGELOG.md#v1230-2022-11-08) + * **Feature**: Amazon Route 53 now supports the Europe (Zurich) Region (eu-central-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. + +# Release (2022-11-07) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.19.0](service/athena/CHANGELOG.md#v1190-2022-11-07) + * **Feature**: Adds support for using Query Result Reuse +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.24.0](service/autoscaling/CHANGELOG.md#v1240-2022-11-07) + * **Feature**: This release adds support for two new attributes for attribute-based instance type selection - NetworkBandwidthGbps and AllowedInstanceTypes. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.20.0](service/cloudtrail/CHANGELOG.md#v1200-2022-11-07) + * **Feature**: This release includes support for configuring a delegated administrator to manage an AWS Organizations organization CloudTrail trails and event data stores, and AWS Key Management Service encryption of CloudTrail Lake event data stores. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.67.0](service/ec2/CHANGELOG.md#v1670-2022-11-07) + * **Feature**: This release adds support for two new attributes for attribute-based instance type selection - NetworkBandwidthGbps and AllowedInstanceTypes. +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.23.0](service/elasticache/CHANGELOG.md#v1230-2022-11-07) + * **Feature**: Added support for IPv6 and dual stack for Memcached and Redis clusters. Customers can now launch new Redis and Memcached clusters with IPv6 and dual stack networking support. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.26.0](service/lexmodelsv2/CHANGELOG.md#v1260-2022-11-07) + * **Feature**: Amazon Lex now supports new APIs for viewing and editing Custom Vocabulary in bots. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.27.0](service/mediaconvert/CHANGELOG.md#v1270-2022-11-07) + * **Feature**: The AWS Elemental MediaConvert SDK has added support for setting the SDR reference white point for HDR conversions and conversion of HDR10 to DolbyVision without mastering metadata. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.32.0](service/ssm/CHANGELOG.md#v1320-2022-11-07) + * **Feature**: This release includes support for applying a CloudWatch alarm to multi account multi region Systems Manager Automation +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.23.1](service/wafv2/CHANGELOG.md#v1231-2022-11-07) + * **Documentation**: The geo match statement now adds labels for country and region. You can match requests at the region level by combining a geo match statement with label match statements. +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.17.0](service/wellarchitected/CHANGELOG.md#v1170-2022-11-07) + * **Feature**: This release adds support for integrations with AWS Trusted Advisor and AWS Service Catalog AppRegistry to improve workload discovery and speed up your workload reviews. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.25.0](service/workspaces/CHANGELOG.md#v1250-2022-11-07) + * **Feature**: This release adds protocols attribute to workspaces properties data type. This enables customers to migrate workspaces from PC over IP (PCoIP) to WorkSpaces Streaming Protocol (WSP) using create and modify workspaces public APIs. + +# Release (2022-11-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.16.1](service/cloudwatchlogs/CHANGELOG.md#v1161-2022-11-04) + * **Documentation**: Doc-only update for bug fixes and support of export to buckets encrypted with SSE-KMS +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.66.0](service/ec2/CHANGELOG.md#v1660-2022-11-04) + * **Feature**: This release adds API support for the recipient of an AMI account share to remove shared AMI launch permissions. +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.15.0](service/emrcontainers/CHANGELOG.md#v1150-2022-11-04) + * **Feature**: Adding support for Job templates. Job templates allow you to create and store templates to configure Spark applications parameters. This helps you ensure consistent settings across applications by reusing and enforcing configuration overrides in data pipelines. +* `github.com/aws/aws-sdk-go-v2/service/internal/eventstreamtesting`: [v1.0.37](service/internal/eventstreamtesting/CHANGELOG.md#v1037-2022-11-04) + * **Dependency Update**: update golang.org/x/net dependency to 0.1.0 + +# Release (2022-11-03) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.10.0](service/memorydb/CHANGELOG.md#v1100-2022-11-03) + * **Feature**: Adding support for r6gd instances for MemoryDB Redis with data tiering. In a cluster with data tiering enabled, when available memory capacity is exhausted, the least recently used data is automatically tiered to solid state drives for cost-effective capacity scaling with minimal performance impact. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.54.0](service/sagemaker/CHANGELOG.md#v1540-2022-11-03) + * **Feature**: Amazon SageMaker now supports running training jobs on ml.trn1 instance types. + +# Release (2022-11-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.26.0](service/iotsitewise/CHANGELOG.md#v1260-2022-11-02) + * **Feature**: This release adds the ListAssetModelProperties and ListAssetProperties APIs. You can list all properties that belong to a single asset model or asset using these two new APIs. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.25.0](service/s3control/CHANGELOG.md#v1250-2022-11-02) + * **Feature**: S3 on Outposts launches support for Lifecycle configuration for Outposts buckets. With S3 Lifecycle configuration, you can mange objects so they are stored cost effectively. You can manage objects using size-based rules and specify how many noncurrent versions bucket will retain. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.53.0](service/sagemaker/CHANGELOG.md#v1530-2022-11-02) + * **Feature**: This release updates Framework model regex for ModelPackage to support new Framework version xgboost, sklearn. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.17.0](service/ssmincidents/CHANGELOG.md#v1170-2022-11-02) + * **Feature**: Adds support for tagging replication-set on creation. + +# Release (2022-11-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.28.0](service/rds/CHANGELOG.md#v1280-2022-11-01) + * **Feature**: Relational Database Service - This release adds support for configuring Storage Throughput on RDS database instances. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.17.0](service/textract/CHANGELOG.md#v1170-2022-11-01) + * **Feature**: Add ocr results in AnalyzeIDResponse as blocks + +# Release (2022-10-31) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.15.0](service/apprunner/CHANGELOG.md#v1150-2022-10-31) + * **Feature**: This release adds support for private App Runner services. Services may now be configured to be made private and only accessible from a VPC. The changes include a new VpcIngressConnection resource and several new and modified APIs. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.16.0](service/cloudwatchlogs/CHANGELOG.md#v1160-2022-10-31) + * **Feature**: SDK release to support tagging for destinations and log groups with TagResource. Also supports tag on create with PutDestination. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.34.0](service/connect/CHANGELOG.md#v1340-2022-10-31) + * **Feature**: Amazon connect now support a new API DismissUserContact to dismiss or remove terminated contacts in Agent CCP +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.65.0](service/ec2/CHANGELOG.md#v1650-2022-10-31) + * **Feature**: Elastic IP transfer is a new Amazon VPC feature that allows you to transfer your Elastic IP addresses from one AWS Account to another. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.30.0](service/iot/CHANGELOG.md#v1300-2022-10-31) + * **Feature**: This release adds the Amazon Location action to IoT Rules Engine. +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.15.0](service/sesv2/CHANGELOG.md#v1150-2022-10-31) + * **Feature**: This release includes support for interacting with the Virtual Deliverability Manager, allowing you to opt in/out of the feature and to retrieve recommendations and metric data. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.16.0](service/textract/CHANGELOG.md#v1160-2022-10-31) + * **Feature**: This release introduces additional support for 30+ normalized fields such as vendor address and currency. It also includes OCR output in the response and accuracy improvements for the already supported fields in previous version + +# Release (2022-10-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.14.0](service/apprunner/CHANGELOG.md#v1140-2022-10-28) + * **Feature**: AWS App Runner adds .NET 6, Go 1, PHP 8.1 and Ruby 3.1 runtimes. +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.18.0](service/appstream/CHANGELOG.md#v1180-2022-10-28) + * **Feature**: This release includes CertificateBasedAuthProperties in CreateDirectoryConfig and UpdateDirectoryConfig. +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.16.20](service/cloud9/CHANGELOG.md#v11620-2022-10-28) + * **Documentation**: Update to the documentation section of the Cloud9 API Reference guide. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.23.0](service/cloudformation/CHANGELOG.md#v1230-2022-10-28) + * **Feature**: This release adds more fields to improves visibility of AWS CloudFormation StackSets information in following APIs: ListStackInstances, DescribeStackInstance, ListStackSetOperationResults, ListStackSetOperations, DescribeStackSetOperation. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.19.0](service/mediatailor/CHANGELOG.md#v1190-2022-10-28) + * **Feature**: This release introduces support for SCTE-35 segmentation descriptor messages which can be sent within time signal messages. + +# Release (2022-10-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.64.0](service/ec2/CHANGELOG.md#v1640-2022-10-27) + * **Feature**: Feature supports the replacement of instance root volume using an updated AMI without requiring customers to stop their instance. +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.19.0](service/fms/CHANGELOG.md#v1190-2022-10-27) + * **Feature**: Add support NetworkFirewall Managed Rule Group Override flag in GetViolationDetails API +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.34.0](service/glue/CHANGELOG.md#v1340-2022-10-27) + * **Feature**: Added support for custom datatypes when using custom csv classifier. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.26.13](service/redshift/CHANGELOG.md#v12613-2022-10-27) + * **Documentation**: This release clarifies use for the ElasticIp parameter of the CreateCluster and RestoreFromClusterSnapshot APIs. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.52.0](service/sagemaker/CHANGELOG.md#v1520-2022-10-27) + * **Feature**: This change allows customers to provide a custom entrypoint script for the docker container to be run while executing training jobs, and provide custom arguments to the entrypoint script. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.23.0](service/wafv2/CHANGELOG.md#v1230-2022-10-27) + * **Feature**: This release adds the following: Challenge rule action, to silently verify client browsers; rule group rule action override to any valid rule action, not just Count; token sharing between protected applications for challenge/CAPTCHA token; targeted rules option for Bot Control managed rule group. + +# Release (2022-10-26) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.18.23](service/iam/CHANGELOG.md#v11823-2022-10-26) + * **Documentation**: Doc only update that corrects instances of CLI not using an entity. +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.18.0](service/kafka/CHANGELOG.md#v1180-2022-10-26) + * **Feature**: This release adds support for Tiered Storage. UpdateStorage allows you to control the Storage Mode for supported storage tiers. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.18.0](service/neptune/CHANGELOG.md#v1180-2022-10-26) + * **Feature**: Added a new cluster-level attribute to set the capacity range for Neptune Serverless instances. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.51.0](service/sagemaker/CHANGELOG.md#v1510-2022-10-26) + * **Feature**: Amazon SageMaker Automatic Model Tuning now supports specifying Grid Search strategy for tuning jobs, which evaluates all hyperparameter combinations exhaustively based on the categorical hyperparameters provided. + +# Release (2022-10-25) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.17.0](service/accessanalyzer/CHANGELOG.md#v1170-2022-10-25) + * **Feature**: This release adds support for six new resource types in IAM Access Analyzer to help you easily identify public and cross-account access to your AWS resources. Updated service API, documentation, and paginators. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.19.3](service/location/CHANGELOG.md#v1193-2022-10-25) + * **Documentation**: Added new map styles with satellite imagery for map resources using HERE as a data provider. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.18.0](service/mediatailor/CHANGELOG.md#v1180-2022-10-25) + * **Feature**: This release is a documentation update +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.27.0](service/rds/CHANGELOG.md#v1270-2022-10-25) + * **Feature**: Relational Database Service - This release adds support for exporting DB cluster data to Amazon S3. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.24.0](service/workspaces/CHANGELOG.md#v1240-2022-10-25) + * **Feature**: This release adds new enums for supporting Workspaces Core features, including creating Manual running mode workspaces, importing regular Workspaces Core images and importing g4dn Workspaces Core images. + # Release (2022-10-24) ## General Highlights diff --git a/vendor/github.com/aws/aws-sdk-go-v2/Makefile b/vendor/github.com/aws/aws-sdk-go-v2/Makefile index 4b761e771ae..4bc9dfaf013 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/Makefile +++ b/vendor/github.com/aws/aws-sdk-go-v2/Makefile @@ -120,6 +120,7 @@ gen-config-asserts: gen-internal-codegen: @echo "Generating internal/codegen" cd internal/codegen \ + && go mod tidy \ && go generate gen-repo-mod-replace: diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 41d23512a4a..6d936cd5054 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.1" +const goModuleVersion = "1.17.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index 0386bcf7f48..e02d957c4a8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.18.4 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.18.3 (2022-11-22) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index 1a1aaed58aa..44b6e16dcda 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.3" +const goModuleVersion = "1.18.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 953ce67f3c6..613d814926f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.13.4 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.13.3 (2022-11-22) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index 0bcacb39633..9866ca36f80 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.3" +const goModuleVersion = "1.13.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index 0dfb44be1af..f0ab4cd76d3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.12.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.12.19 (2022-10-24) * **Bug Fix**: Fixes an issue that prevented logging of the API request or responses when the respective log modes were enabled. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index 9fc713a7cb0..4da2bd2c187 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.19" +const goModuleVersion = "1.12.20" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index ab6184058b6..41d589b3810 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.1.26 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.1.25 (2022-10-24) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index b9d5ca7faea..58b3ba7ad8d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.1.25" +const goModuleVersion = "1.1.26" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index 90e3d662d00..678f6634f2f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,7 @@ +# v2.4.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.4.19 (2022-10-24) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index d839c6d9b63..ec010e0aae3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.4.19" +const goModuleVersion = "2.4.20" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md index 2cac3297b3b..fc5b9781b56 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.3.27 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.26 (2022-10-24) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go index 6d796b3100b..e4c947fecc4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -3,4 +3,4 @@ package ini // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.26" +const goModuleVersion = "1.3.27" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/modman.toml b/vendor/github.com/aws/aws-sdk-go-v2/modman.toml index d869782145f..b6d07cdd6d8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/modman.toml +++ b/vendor/github.com/aws/aws-sdk-go-v2/modman.toml @@ -1,10 +1,10 @@ [dependencies] "github.com/aws/aws-sdk-go" = "v1.44.28" - "github.com/aws/smithy-go" = "v1.13.4" + "github.com/aws/smithy-go" = "v1.13.5" "github.com/google/go-cmp" = "v0.5.8" "github.com/jmespath/go-jmespath" = "v0.4.0" - "golang.org/x/net" = "v0.0.0-20220127200216-cd36cc0744dd" + "golang.org/x/net" = "v0.1.0" [modules] diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index 89832ca1d0a..a2dfc457c1e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.9.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.9.19 (2022-10-24) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index c10027df605..3b99e9c4f64 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.9.19" +const goModuleVersion = "1.9.20" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md new file mode 100644 index 00000000000..12ecda41905 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md @@ -0,0 +1,230 @@ +# v1.19.2 (2022-12-07) + +* **Documentation**: Updated examples and exceptions for External Key Store (XKS). + +# v1.19.1 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.0 (2022-11-29.2) + +* **Feature**: AWS KMS introduces the External Key Store (XKS), a new feature for customers who want to protect their data with encryption keys stored in an external key management system under their control. + +# v1.18.18 (2022-11-22) + +* No change notes available for this release. + +# v1.18.17 (2022-11-16) + +* No change notes available for this release. + +# v1.18.16 (2022-11-10) + +* No change notes available for this release. + +# v1.18.15 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.14 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.13 (2022-10-20) + +* No change notes available for this release. + +# v1.18.12 (2022-10-13) + +* No change notes available for this release. + +# v1.18.11 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.10 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.9 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.8 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2022-08-30) + +* No change notes available for this release. + +# v1.18.6 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2022-08-22) + +* No change notes available for this release. + +# v1.18.4 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.2 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.1 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.0 (2022-07-18) + +* **Feature**: Added support for the SM2 KeySpec in China Partition Regions + +# v1.17.5 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.4 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2022-05-17) + +* **Documentation**: Add HMAC best practice tip, annual rotation of AWS managed keys. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2022-04-19) + +* **Feature**: Adds support for KMS keys and APIs that generate and verify HMAC codes + +# v1.16.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2021-12-21) + +* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. +* **Feature**: Updated to latest service endpoints + +# v1.11.1 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-11-19) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-11-12) + +* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. + +# v1.9.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Feature**: Updated service to latest API model. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-10-21) + +* **Feature**: API client updated +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-10-11) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-09-02) + +* **Feature**: API client updated + +# v1.5.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-06-25) + +* **Feature**: API client updated +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-06-04) + +* No change notes available for this release. + +# v1.3.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go new file mode 100644 index 00000000000..e56ae816b25 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go @@ -0,0 +1,434 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "KMS" +const ServiceAPIVersion = "2014-11-01" + +// Client provides the API client to make operations call for AWS Key Management +// Service. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack) error { + return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "kms", goModuleVersion)(stack) +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go new file mode 100644 index 00000000000..ee03cf109b5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go @@ -0,0 +1,144 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Cancels the deletion of a KMS key. When this operation succeeds, the key state +// of the KMS key is Disabled. To enable the KMS key, use EnableKey. For more +// information about scheduling and canceling deletion of a KMS key, see Deleting +// KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) in +// the Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:CancelKeyDeletion +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: ScheduleKeyDeletion +func (c *Client) CancelKeyDeletion(ctx context.Context, params *CancelKeyDeletionInput, optFns ...func(*Options)) (*CancelKeyDeletionOutput, error) { + if params == nil { + params = &CancelKeyDeletionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CancelKeyDeletion", params, optFns, c.addOperationCancelKeyDeletionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CancelKeyDeletionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CancelKeyDeletionInput struct { + + // Identifies the KMS key whose deletion is being canceled. Specify the key ID or + // key ARN of the KMS key. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type CancelKeyDeletionOutput struct { + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key whose deletion is canceled. + KeyId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCancelKeyDeletionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCancelKeyDeletion{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCancelKeyDeletion{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCancelKeyDeletionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCancelKeyDeletion(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCancelKeyDeletion(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "CancelKeyDeletion", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go new file mode 100644 index 00000000000..19fd8cad41e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go @@ -0,0 +1,187 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Connects or reconnects a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// to its backing key store. For an CloudHSM key store, ConnectCustomKeyStore +// connects the key store to its associated CloudHSM cluster. For an external key +// store, ConnectCustomKeyStore connects the key store to the external key store +// proxy that communicates with your external key manager. The custom key store +// must be connected before you can create KMS keys in the key store or use the KMS +// keys it contains. You can disconnect and reconnect a custom key store at any +// time. The connection process for a custom key store can take an extended amount +// of time to complete. This operation starts the connection process, but it does +// not wait for it to complete. When it succeeds, this operation quickly returns an +// HTTP 200 response and a JSON object with no properties. However, this response +// does not indicate that the custom key store is connected. To get the connection +// state of the custom key store, use the DescribeCustomKeyStores operation. This +// operation is part of the custom key stores +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// feature in KMS, which combines the convenience and extensive integration of KMS +// with the isolation and control of a key store that you own and manage. The +// ConnectCustomKeyStore operation might fail for various reasons. To find the +// reason, use the DescribeCustomKeyStores operation and see the +// ConnectionErrorCode in the response. For help interpreting the +// ConnectionErrorCode, see CustomKeyStoresListEntry. To fix the failure, use the +// DisconnectCustomKeyStore operation to disconnect the custom key store, correct +// the error, use the UpdateCustomKeyStore operation if necessary, and then use +// ConnectCustomKeyStore again. CloudHSM key store During the connection process +// for an CloudHSM key store, KMS finds the CloudHSM cluster that is associated +// with the custom key store, creates the connection infrastructure, connects to +// the cluster, logs into the CloudHSM client as the kmsuser CU, and rotates its +// password. To connect an CloudHSM key store, its associated CloudHSM cluster must +// have at least one active HSM. To get the number of active HSMs in a cluster, use +// the DescribeClusters +// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) +// operation. To add HSMs to the cluster, use the CreateHsm +// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) +// operation. Also, the kmsuser crypto user +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) +// (CU) must not be logged into the cluster. This prevents KMS from using this +// account to log in. If you are having trouble connecting or disconnecting a +// CloudHSM key store, see Troubleshooting an CloudHSM key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) in the +// Key Management Service Developer Guide. External key store When you connect an +// external key store that uses public endpoint connectivity, KMS tests its ability +// to communicate with your external key manager by sending a request via the +// external key store proxy. When you connect to an external key store that uses +// VPC endpoint service connectivity, KMS establishes the networking elements that +// it needs to communicate with your external key manager via the external key +// store proxy. This includes creating an interface endpoint to the VPC endpoint +// service and a private hosted zone for traffic between KMS and the VPC endpoint +// service. To connect an external key store, KMS must be able to connect to the +// external key store proxy, the external key store proxy must be able to +// communicate with your external key manager, and the external key manager must be +// available for cryptographic operations. If you are having trouble connecting or +// disconnecting an external key store, see Troubleshooting an external key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html) +// in the Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a custom key store in a different Amazon Web Services +// account. Required permissions: kms:ConnectCustomKeyStore +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) Related operations +// +// * CreateCustomKeyStore +// +// * +// DeleteCustomKeyStore +// +// * DescribeCustomKeyStores +// +// * DisconnectCustomKeyStore +// +// * +// UpdateCustomKeyStore +func (c *Client) ConnectCustomKeyStore(ctx context.Context, params *ConnectCustomKeyStoreInput, optFns ...func(*Options)) (*ConnectCustomKeyStoreOutput, error) { + if params == nil { + params = &ConnectCustomKeyStoreInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ConnectCustomKeyStore", params, optFns, c.addOperationConnectCustomKeyStoreMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ConnectCustomKeyStoreOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ConnectCustomKeyStoreInput struct { + + // Enter the key store ID of the custom key store that you want to connect. To find + // the ID of a custom key store, use the DescribeCustomKeyStores operation. + // + // This member is required. + CustomKeyStoreId *string + + noSmithyDocumentSerde +} + +type ConnectCustomKeyStoreOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationConnectCustomKeyStoreMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpConnectCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpConnectCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpConnectCustomKeyStoreValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opConnectCustomKeyStore(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opConnectCustomKeyStore(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ConnectCustomKeyStore", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go new file mode 100644 index 00000000000..a4618675904 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go @@ -0,0 +1,184 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a friendly name for a KMS key. Adding, deleting, or updating an alias +// can allow or deny permission to the KMS key. For details, see ABAC for KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key +// Management Service Developer Guide. You can use an alias to identify a KMS key +// in the KMS console, in the DescribeKey operation and in cryptographic operations +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations), +// such as Encrypt and GenerateDataKey. You can also change the KMS key that's +// associated with the alias (UpdateAlias) or delete the alias (DeleteAlias) at any +// time. These operations don't affect the underlying KMS key. You can associate +// the alias with any customer managed key in the same Amazon Web Services Region. +// Each alias is associated with only one KMS key at a time, but a KMS key can have +// multiple aliases. A valid KMS key is required. You can't create an alias without +// a KMS key. The alias must be unique in the account and Region, but you can have +// aliases with the same name in different Regions. For detailed information about +// aliases, see Using aliases +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html) in the +// Key Management Service Developer Guide. This operation does not return a +// response. To get the alias that you created, use the ListAliases operation. The +// KMS key that you use for this operation must be in a compatible key state. For +// details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on an alias in a different Amazon Web Services account. +// Required permissions +// +// * kms:CreateAlias +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the alias (IAM policy). +// +// * kms:CreateAlias +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the KMS key (key policy). +// +// For details, see Controlling access to aliases +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) +// in the Key Management Service Developer Guide. Related operations: +// +// * +// DeleteAlias +// +// * ListAliases +// +// * UpdateAlias +func (c *Client) CreateAlias(ctx context.Context, params *CreateAliasInput, optFns ...func(*Options)) (*CreateAliasOutput, error) { + if params == nil { + params = &CreateAliasInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateAlias", params, optFns, c.addOperationCreateAliasMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateAliasOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateAliasInput struct { + + // Specifies the alias name. This value must begin with alias/ followed by a name, + // such as alias/ExampleAlias. The AliasName value must be string of 1-256 + // characters. It can contain only alphanumeric characters, forward slashes (/), + // underscores (_), and dashes (-). The alias name cannot begin with alias/aws/. + // The alias/aws/ prefix is reserved for Amazon Web Services managed keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). + // + // This member is required. + AliasName *string + + // Associates the alias with the specified customer managed key + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). + // The KMS key must be in the same Amazon Web Services Region. A valid key ID is + // required. If you supply a null or empty string value, this operation returns an + // error. For help finding the key ID and ARN, see Finding the Key ID and ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/viewing-keys.html#find-cmk-id-arn) + // in the Key Management Service Developer Guide . Specify the key ID or key ARN of + // the KMS key. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key + // ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + TargetKeyId *string + + noSmithyDocumentSerde +} + +type CreateAliasOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateAliasMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateAlias{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateAlias{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateAliasValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateAlias(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateAlias(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "CreateAlias", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go new file mode 100644 index 00000000000..9e2552c3750 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go @@ -0,0 +1,322 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// backed by a key store that you own and manage. When you use a KMS key in a +// custom key store for a cryptographic operation, the cryptographic operation is +// actually performed in your key store using your keys. KMS supports CloudHSM key +// stores +// (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html) +// backed by an CloudHSM cluster +// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/clusters.html) and +// external key stores +// (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html) +// backed by an external key store proxy and external key manager outside of Amazon +// Web Services. This operation is part of the custom key stores +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// feature in KMS, which combines the convenience and extensive integration of KMS +// with the isolation and control of a key store that you own and manage. Before +// you create the custom key store, the required elements must be in place and +// operational. We recommend that you use the test tools that KMS provides to +// verify the configuration your external key store proxy. For details about the +// required elements and verification tests, see Assemble the prerequisites (for +// CloudHSM key stores) +// (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) +// or Assemble the prerequisites (for external key stores) +// (https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keystore.html#xks-requirements) +// in the Key Management Service Developer Guide. To create a custom key store, use +// the following parameters. +// +// * To create an CloudHSM key store, specify the +// CustomKeyStoreName, CloudHsmClusterId, KeyStorePassword, and +// TrustAnchorCertificate. The CustomKeyStoreType parameter is optional for +// CloudHSM key stores. If you include it, set it to the default value, +// AWS_CLOUDHSM. For help with failures, see Troubleshooting an CloudHSM key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) in the +// Key Management Service Developer Guide. +// +// * To create an external key store, +// specify the CustomKeyStoreName and a CustomKeyStoreType of EXTERNAL_KEY_STORE. +// Also, specify values for XksProxyConnectivity, XksProxyAuthenticationCredential, +// XksProxyUriEndpoint, and XksProxyUriPath. If your XksProxyConnectivity value is +// VPC_ENDPOINT_SERVICE, specify the XksProxyVpcEndpointServiceName parameter. For +// help with failures, see Troubleshooting an external key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html) +// in the Key Management Service Developer Guide. +// +// For external key stores: Some +// external key managers provide a simpler method for creating an external key +// store. For details, see your external key manager documentation. When creating +// an external key store in the KMS console, you can upload a JSON-based proxy +// configuration file with the desired values. You cannot use a proxy configuration +// with the CreateCustomKeyStore operation. However, you can use the values in the +// file to help you determine the correct values for the CreateCustomKeyStore +// parameters. When the operation completes successfully, it returns the ID of the +// new custom key store. Before you can use your new custom key store, you need to +// use the ConnectCustomKeyStore operation to connect a new CloudHSM key store to +// its CloudHSM cluster, or to connect a new external key store to the external key +// store proxy for your external key manager. Even if you are not going to use your +// custom key store immediately, you might want to connect it to verify that all +// settings are correct and then disconnect it until you are ready to use it. For +// help with failures, see Troubleshooting a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a custom key store in a different Amazon Web Services +// account. Required permissions: kms:CreateCustomKeyStore +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy). Related operations: +// +// * ConnectCustomKeyStore +// +// * +// DeleteCustomKeyStore +// +// * DescribeCustomKeyStores +// +// * DisconnectCustomKeyStore +// +// * +// UpdateCustomKeyStore +func (c *Client) CreateCustomKeyStore(ctx context.Context, params *CreateCustomKeyStoreInput, optFns ...func(*Options)) (*CreateCustomKeyStoreOutput, error) { + if params == nil { + params = &CreateCustomKeyStoreInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateCustomKeyStore", params, optFns, c.addOperationCreateCustomKeyStoreMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateCustomKeyStoreOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateCustomKeyStoreInput struct { + + // Specifies a friendly name for the custom key store. The name must be unique in + // your Amazon Web Services account and Region. This parameter is required for all + // custom key stores. + // + // This member is required. + CustomKeyStoreName *string + + // Identifies the CloudHSM cluster for an CloudHSM key store. This parameter is + // required for custom key stores with CustomKeyStoreType of AWS_CLOUDHSM. Enter + // the cluster ID of any active CloudHSM cluster that is not already associated + // with a custom key store. To find the cluster ID, use the DescribeClusters + // (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) + // operation. + CloudHsmClusterId *string + + // Specifies the type of custom key store. The default value is AWS_CLOUDHSM. For a + // custom key store backed by an CloudHSM cluster, omit the parameter or enter + // AWS_CLOUDHSM. For a custom key store backed by an external key manager outside + // of Amazon Web Services, enter EXTERNAL_KEY_STORE. You cannot change this + // property after the key store is created. + CustomKeyStoreType types.CustomKeyStoreType + + // Specifies the kmsuser password for an CloudHSM key store. This parameter is + // required for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM. Enter + // the password of the kmsuser crypto user (CU) account + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) + // in the specified CloudHSM cluster. KMS logs into the cluster as this user to + // manage key material on your behalf. The password must be a string of 7 to 32 + // characters. Its value is case sensitive. This parameter tells KMS the kmsuser + // account password; it does not change the password in the CloudHSM cluster. + KeyStorePassword *string + + // Specifies the certificate for an CloudHSM key store. This parameter is required + // for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM. Enter the + // content of the trust anchor certificate for the CloudHSM cluster. This is the + // content of the customerCA.crt file that you created when you initialized the + // cluster + // (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html). + TrustAnchorCertificate *string + + // Specifies an authentication credential for the external key store proxy (XKS + // proxy). This parameter is required for all custom key stores with a + // CustomKeyStoreType of EXTERNAL_KEY_STORE. The XksProxyAuthenticationCredential + // has two required elements: RawSecretAccessKey, a secret key, and AccessKeyId, a + // unique identifier for the RawSecretAccessKey. For character requirements, see + // XksProxyAuthenticationCredentialType. KMS uses this authentication credential to + // sign requests to the external key store proxy on your behalf. This credential is + // unrelated to Identity and Access Management (IAM) and Amazon Web Services + // credentials. This parameter doesn't set or change the authentication credentials + // on the XKS proxy. It just tells KMS the credential that you established on your + // external key store proxy. If you rotate your proxy authentication credential, + // use the UpdateCustomKeyStore operation to provide the new credential to KMS. + XksProxyAuthenticationCredential *types.XksProxyAuthenticationCredentialType + + // Indicates how KMS communicates with the external key store proxy. This parameter + // is required for custom key stores with a CustomKeyStoreType of + // EXTERNAL_KEY_STORE. If the external key store proxy uses a public endpoint, + // specify PUBLIC_ENDPOINT. If the external key store proxy uses a Amazon VPC + // endpoint service for communication with KMS, specify VPC_ENDPOINT_SERVICE. For + // help making this choice, see Choosing a connectivity option + // (https://docs.aws.amazon.com/kms/latest/developerguide/plan-xks-keystore.html#choose-xks-connectivity) + // in the Key Management Service Developer Guide. An Amazon VPC endpoint service + // keeps your communication with KMS in a private address space entirely within + // Amazon Web Services, but it requires more configuration, including establishing + // a Amazon VPC with multiple subnets, a VPC endpoint service, a network load + // balancer, and a verified private DNS name. A public endpoint is simpler to set + // up, but it might be slower and might not fulfill your security requirements. You + // might consider testing with a public endpoint, and then establishing a VPC + // endpoint service for production tasks. Note that this choice does not determine + // the location of the external key store proxy. Even if you choose a VPC endpoint + // service, the proxy can be hosted within the VPC or outside of Amazon Web + // Services such as in your corporate data center. + XksProxyConnectivity types.XksProxyConnectivityType + + // Specifies the endpoint that KMS uses to send requests to the external key store + // proxy (XKS proxy). This parameter is required for custom key stores with a + // CustomKeyStoreType of EXTERNAL_KEY_STORE. The protocol must be HTTPS. KMS + // communicates on port 443. Do not specify the port in the XksProxyUriEndpoint + // value. For external key stores with XksProxyConnectivity value of + // VPC_ENDPOINT_SERVICE, specify https:// followed by the private DNS name of the + // VPC endpoint service. For external key stores with PUBLIC_ENDPOINT connectivity, + // this endpoint must be reachable before you create the custom key store. KMS + // connects to the external key store proxy while creating the custom key store. + // For external key stores with VPC_ENDPOINT_SERVICE connectivity, KMS connects + // when you call the ConnectCustomKeyStore operation. The value of this parameter + // must begin with https://. The remainder can contain upper and lower case letters + // (A-Z and a-z), numbers (0-9), dots (.), and hyphens (-). Additional slashes (/ + // and \) are not permitted. Uniqueness requirements: + // + // * The combined + // XksProxyUriEndpoint and XksProxyUriPath values must be unique in the Amazon Web + // Services account and Region. + // + // * An external key store with PUBLIC_ENDPOINT + // connectivity cannot use the same XksProxyUriEndpoint value as an external key + // store with VPC_ENDPOINT_SERVICE connectivity in the same Amazon Web Services + // Region. + // + // * Each external key store with VPC_ENDPOINT_SERVICE connectivity must + // have its own private DNS name. The XksProxyUriEndpoint value for external key + // stores with VPC_ENDPOINT_SERVICE connectivity (private DNS name) must be unique + // in the Amazon Web Services account and Region. + XksProxyUriEndpoint *string + + // Specifies the base path to the proxy APIs for this external key store. To find + // this value, see the documentation for your external key store proxy. This + // parameter is required for all custom key stores with a CustomKeyStoreType of + // EXTERNAL_KEY_STORE. The value must start with / and must end with /kms/xks/v1 + // where v1 represents the version of the KMS external key store proxy API. This + // path can include an optional prefix between the required elements such as + // /prefix/kms/xks/v1. Uniqueness requirements: + // + // * The combined XksProxyUriEndpoint + // and XksProxyUriPath values must be unique in the Amazon Web Services account and + // Region. + XksProxyUriPath *string + + // Specifies the name of the Amazon VPC endpoint service for interface endpoints + // that is used to communicate with your external key store proxy (XKS proxy). This + // parameter is required when the value of CustomKeyStoreType is EXTERNAL_KEY_STORE + // and the value of XksProxyConnectivity is VPC_ENDPOINT_SERVICE. The Amazon VPC + // endpoint service must fulfill all requirements + // (https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keystore.html#xks-requirements) + // for use with an external key store. Uniqueness requirements: + // + // * External key + // stores with VPC_ENDPOINT_SERVICE connectivity can share an Amazon VPC, but each + // external key store must have its own VPC endpoint service and private DNS name. + XksProxyVpcEndpointServiceName *string + + noSmithyDocumentSerde +} + +type CreateCustomKeyStoreOutput struct { + + // A unique identifier for the new custom key store. + CustomKeyStoreId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateCustomKeyStoreMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateCustomKeyStoreValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateCustomKeyStore(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateCustomKeyStore(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "CreateCustomKeyStore", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go new file mode 100644 index 00000000000..06c4854d3af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go @@ -0,0 +1,277 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Adds a grant to a KMS key. A grant is a policy instrument that allows Amazon Web +// Services principals to use KMS keys in cryptographic operations. It also can +// allow them to view a KMS key (DescribeKey) and create and manage grants. When +// authorizing access to a KMS key, grants are considered along with key policies +// and IAM policies. Grants are often used for temporary permissions because you +// can create one, use its permissions, and delete it without changing your key +// policies or IAM policies. For detailed information about grants, including grant +// terminology, see Grants in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) in the Key +// Management Service Developer Guide . For examples of working with grants in +// several programming languages, see Programming grants +// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). +// The CreateGrant operation returns a GrantToken and a GrantId. +// +// * When you +// create, retire, or revoke a grant, there might be a brief delay, usually less +// than five minutes, until the grant is available throughout KMS. This state is +// known as eventual consistency. Once the grant has achieved eventual consistency, +// the grantee principal can use the permissions in the grant without identifying +// the grant. However, to use the permissions in the grant immediately, use the +// GrantToken that CreateGrant returns. For details, see Using a grant token +// (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) +// in the Key Management Service Developer Guide . +// +// * The CreateGrant operation +// also returns a GrantId. You can use the GrantId and a key identifier to identify +// the grant in the RetireGrant and RevokeGrant operations. To find the grant ID, +// use the ListGrants or ListRetirableGrants operations. +// +// The KMS key that you use +// for this operation must be in a compatible key state. For details, see Key +// states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: Yes. To perform this +// operation on a KMS key in a different Amazon Web Services account, specify the +// key ARN in the value of the KeyId parameter. Required permissions: +// kms:CreateGrant +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * ListGrants +// +// * ListRetirableGrants +// +// * +// RetireGrant +// +// * RevokeGrant +func (c *Client) CreateGrant(ctx context.Context, params *CreateGrantInput, optFns ...func(*Options)) (*CreateGrantOutput, error) { + if params == nil { + params = &CreateGrantInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateGrant", params, optFns, c.addOperationCreateGrantMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateGrantOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateGrantInput struct { + + // The identity that gets the permissions specified in the grant. To specify the + // principal, use the Amazon Resource Name (ARN) + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of + // an Amazon Web Services principal. Valid Amazon Web Services principals include + // Amazon Web Services accounts (root), IAM users, IAM roles, federated users, and + // assumed role users. For examples of the ARN syntax to use for specifying a + // principal, see Amazon Web Services Identity and Access Management (IAM) + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) + // in the Example ARNs section of the Amazon Web Services General Reference. + // + // This member is required. + GranteePrincipal *string + + // Identifies the KMS key for the grant. The grant gives principals permission to + // use this KMS key. Specify the key ID or key ARN of the KMS key. To specify a KMS + // key in a different Amazon Web Services account, you must use the key ARN. For + // example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // A list of operations that the grant permits. This list must include only + // operations that are permitted in a grant. Also, the operation must be supported + // on the KMS key. For example, you cannot create a grant for a symmetric + // encryption KMS key that allows the Sign operation, or a grant for an asymmetric + // KMS key that allows the GenerateDataKey operation. If you try, KMS returns a + // ValidationError exception. For details, see Grant operations + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) + // in the Key Management Service Developer Guide. + // + // This member is required. + Operations []types.GrantOperation + + // Specifies a grant constraint. KMS supports the EncryptionContextEquals and + // EncryptionContextSubset grant constraints. Each constraint value can include up + // to 8 encryption context pairs. The encryption context value in each constraint + // cannot exceed 384 characters. For information about grant constraints, see Using + // grant constraints + // (https://docs.aws.amazon.com/kms/latest/developerguide/create-grant-overview.html#grant-constraints) + // in the Key Management Service Developer Guide. For more information about + // encryption context, see Encryption context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide . The encryption context grant + // constraints allow the permissions in the grant only when the encryption context + // in the request matches (EncryptionContextEquals) or includes + // (EncryptionContextSubset) the encryption context specified in this structure. + // The encryption context grant constraints are supported only on grant operations + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) + // that include an EncryptionContext parameter, such as cryptographic operations on + // symmetric encryption KMS keys. Grants with grant constraints can include the + // DescribeKey and RetireGrant operations, but the constraint doesn't apply to + // these operations. If a grant with a grant constraint includes the CreateGrant + // operation, the constraint requires that any grants created with the CreateGrant + // permission have an equally strict or stricter encryption context constraint. You + // cannot use an encryption context grant constraint for cryptographic operations + // with asymmetric KMS keys or HMAC KMS keys. These keys don't support an + // encryption context. + Constraints *types.GrantConstraints + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + // A friendly name for the grant. Use this value to prevent the unintended creation + // of duplicate grants when retrying this request. When this value is absent, all + // CreateGrant requests result in a new grant with a unique GrantId even if all the + // supplied parameters are identical. This can result in unintended duplicates when + // you retry the CreateGrant request. When this value is present, you can retry a + // CreateGrant request with identical parameters; if the grant already exists, the + // original GrantId is returned without creating a new grant. Note that the + // returned grant token is unique with every CreateGrant request, even when a + // duplicate GrantId is returned. All grant tokens for the same grant ID can be + // used interchangeably. + Name *string + + // The principal that has permission to use the RetireGrant operation to retire the + // grant. To specify the principal, use the Amazon Resource Name (ARN) + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of + // an Amazon Web Services principal. Valid Amazon Web Services principals include + // Amazon Web Services accounts (root), IAM users, federated users, and assumed + // role users. For examples of the ARN syntax to use for specifying a principal, + // see Amazon Web Services Identity and Access Management (IAM) + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) + // in the Example ARNs section of the Amazon Web Services General Reference. The + // grant determines the retiring principal. Other principals might have permission + // to retire the grant or revoke the grant. For details, see RevokeGrant and + // Retiring and revoking grants + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) + // in the Key Management Service Developer Guide. + RetiringPrincipal *string + + noSmithyDocumentSerde +} + +type CreateGrantOutput struct { + + // The unique identifier for the grant. You can use the GrantId in a ListGrants, + // RetireGrant, or RevokeGrant operation. + GrantId *string + + // The grant token. Use a grant token when your permission to call this operation + // comes from a new grant that has not yet achieved eventual consistency. For more + // information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateGrantMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateGrant{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateGrant{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateGrantValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateGrant(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateGrant(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "CreateGrant", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go new file mode 100644 index 00000000000..0227caf69f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go @@ -0,0 +1,505 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a unique customer managed KMS key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms-keys) +// in your Amazon Web Services account and Region. You can use a KMS key in +// cryptographic operations, such as encryption and signing. Some Amazon Web +// Services services let you use KMS keys that you create and manage to protect +// your service resources. A KMS key is a logical representation of a cryptographic +// key. In addition to the key material used in cryptographic operations, a KMS key +// includes metadata, such as the key ID, key policy, creation date, description, +// and key state. For details, see Managing keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/getting-started.html) in +// the Key Management Service Developer Guide Use the parameters of CreateKey to +// specify the type of KMS key, the source of its key material, its key policy, +// description, tags, and other properties. KMS has replaced the term customer +// master key (CMK) with KMS key and KMS key. The concept has not changed. To +// prevent breaking changes, KMS is keeping some variations of this term. To create +// different types of KMS keys, use the following guidance: Symmetric encryption +// KMS key By default, CreateKey creates a symmetric encryption KMS key with key +// material that KMS generates. This is the basic and most widely used type of KMS +// key, and provides the best performance. To create a symmetric encryption KMS +// key, you don't need to specify any parameters. The default value for KeySpec, +// SYMMETRIC_DEFAULT, the default value for KeyUsage, ENCRYPT_DECRYPT, and the +// default value for Origin, AWS_KMS, create a symmetric encryption KMS key with +// KMS key material. If you need a key for basic encryption and decryption or you +// are creating a KMS key to protect your resources in an Amazon Web Services +// service, create a symmetric encryption KMS key. The key material in a symmetric +// encryption key never leaves KMS unencrypted. You can use a symmetric encryption +// KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically +// used to generate data keys and data keys pairs. For details, see GenerateDataKey +// and GenerateDataKeyPair. Asymmetric KMS keys To create an asymmetric KMS key, +// use the KeySpec parameter to specify the type of key material in the KMS key. +// Then, use the KeyUsage parameter to determine whether the KMS key will be used +// to encrypt and decrypt or sign and verify. You can't change these properties +// after the KMS key is created. Asymmetric KMS keys contain an RSA key pair, +// Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The +// private key in an asymmetric KMS key never leaves KMS unencrypted. However, you +// can use the GetPublicKey operation to download the public key so it can be used +// outside of KMS. KMS keys with RSA or SM2 key pairs can be used to encrypt or +// decrypt data or sign and verify messages (but not both). KMS keys with ECC key +// pairs can be used only to sign and verify messages. For information about +// asymmetric KMS keys, see Asymmetric KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// in the Key Management Service Developer Guide. HMAC KMS key To create an HMAC +// KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then +// set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage +// even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS +// keys. You can't change these properties after the KMS key is created. HMAC KMS +// keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys +// to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to +// 4096 bytes. HMAC KMS keys are not supported in all Amazon Web Services Regions. +// If you try to create an HMAC KMS key in an Amazon Web Services Region in which +// HMAC keys are not supported, the CreateKey operation returns an +// UnsupportedOperationException. For a list of Regions in which HMAC KMS keys are +// supported, see HMAC keys in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) in the Key +// Management Service Developer Guide. Multi-Region primary keys Imported key +// material To create a multi-Region primary key in the local Amazon Web Services +// Region, use the MultiRegion parameter with a value of True. To create a +// multi-Region replica key, that is, a KMS key with the same key ID and key +// material as a primary key, but in a different Amazon Web Services Region, use +// the ReplicateKey operation. To change a replica key to a primary key, and its +// primary key to a replica key, use the UpdatePrimaryRegion operation. You can +// create multi-Region KMS keys for all supported KMS key types: symmetric +// encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and +// asymmetric signing KMS keys. You can also create multi-Region keys with imported +// key material. However, you can't create multi-Region keys in a custom key store. +// This operation supports multi-Region keys, an KMS feature that lets you create +// multiple interoperable KMS keys in different Amazon Web Services Regions. +// Because these KMS keys have the same key ID, key material, and other metadata, +// you can use them interchangeably to encrypt data in one Amazon Web Services +// Region and decrypt it in a different Amazon Web Services Region without +// re-encrypting the data or making a cross-Region call. For more information about +// multi-Region keys, see Multi-Region keys in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) +// in the Key Management Service Developer Guide. To import your own key material +// into a KMS key, begin by creating a symmetric encryption KMS key with no key +// material. To do this, use the Origin parameter of CreateKey with a value of +// EXTERNAL. Next, use GetParametersForImport operation to get a public key and +// import token, and use the public key to encrypt your key material. Then, use +// ImportKeyMaterial with your import token to import the key material. For +// step-by-step instructions, see Importing Key Material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in +// the Key Management Service Developer Guide . This feature supports only +// symmetric encryption KMS keys, including multi-Region symmetric encryption KMS +// keys. You cannot import key material into any other type of KMS key. To create a +// multi-Region primary key with imported key material, use the Origin parameter of +// CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of +// True. To create replicas of the multi-Region primary key, use the ReplicateKey +// operation. For instructions, see Importing key material into multi-Region keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-import.html). +// For more information about multi-Region keys, see Multi-Region keys in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) +// in the Key Management Service Developer Guide. Custom key store A custom key +// store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// lets you protect your Amazon Web Services resources using keys in a backing key +// store that you own and manage. When you request a cryptographic operation with a +// KMS key in a custom key store, the operation is performed in the backing key +// store using its cryptographic keys. KMS supports CloudHSM key stores +// (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html) +// backed by an CloudHSM cluster and external key stores +// (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html) +// backed by an external key manager outside of Amazon Web Services. When you +// create a KMS key in an CloudHSM key store, KMS generates an encryption key in +// the CloudHSM cluster and associates it with the KMS key. When you create a KMS +// key in an external key store, you specify an existing encryption key in the +// external key manager. Some external key managers provide a simpler method for +// creating a KMS key in an external key store. For details, see your external key +// manager documentation. Before you create a KMS key in a custom key store, the +// ConnectionState of the key store must be CONNECTED. To connect the custom key +// store, use the ConnectCustomKeyStore operation. To find the ConnectionState, use +// the DescribeCustomKeyStores operation. To create a KMS key in a custom key +// store, use the CustomKeyStoreId. Use the default KeySpec value, +// SYMMETRIC_DEFAULT, and the default KeyUsage value, ENCRYPT_DECRYPT to create a +// symmetric encryption key. No other key type is supported in a custom key store. +// To create a KMS key in an CloudHSM key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html), +// use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that +// is associated with the custom key store must have at least two active HSMs in +// different Availability Zones in the Amazon Web Services Region. To create a KMS +// key in an external key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html), +// use the Origin parameter with a value of EXTERNAL_KEY_STORE and an XksKeyId +// parameter that identifies an existing external key. Some external key managers +// provide a simpler method for creating a KMS key in an external key store. For +// details, see your external key manager documentation. Cross-account use: No. You +// cannot use this operation to create a KMS key in a different Amazon Web Services +// account. Required permissions: kms:CreateKey +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy). To use the Tags parameter, kms:TagResource +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy). For examples and information about related permissions, see Allow +// a user to create KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policy-example-create-key) +// in the Key Management Service Developer Guide. Related operations: +// +// * +// DescribeKey +// +// * ListKeys +// +// * ScheduleKeyDeletion +func (c *Client) CreateKey(ctx context.Context, params *CreateKeyInput, optFns ...func(*Options)) (*CreateKeyOutput, error) { + if params == nil { + params = &CreateKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateKey", params, optFns, c.addOperationCreateKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateKeyInput struct { + + // A flag to indicate whether to bypass the key policy lockout safety check. + // Setting this value to true increases the risk that the KMS key becomes + // unmanageable. Do not set this value to true indiscriminately. For more + // information, refer to the scenario in the Default Key Policy + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section in the Key Management Service Developer Guide . Use this parameter only + // when you include a policy in the request and you intend to prevent the principal + // that is making the request from making a subsequent PutKeyPolicy request on the + // KMS key. The default value is false. + BypassPolicyLockoutSafetyCheck bool + + // Creates the KMS key in the specified custom key store + // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). + // The ConnectionState of the custom key store must be CONNECTED. To find the + // CustomKeyStoreID and ConnectionState use the DescribeCustomKeyStores operation. + // This parameter is valid only for symmetric encryption KMS keys in a single + // Region. You cannot create any other type of KMS key in a custom key store. When + // you create a KMS key in an CloudHSM key store, KMS generates a non-exportable + // 256-bit symmetric key in its associated CloudHSM cluster and associates it with + // the KMS key. When you create a KMS key in an external key store, you must use + // the XksKeyId parameter to specify an external key that serves as key material + // for the KMS key. + CustomKeyStoreId *string + + // Instead, use the KeySpec parameter. The KeySpec and CustomerMasterKeySpec + // parameters work the same way. Only the names differ. We recommend that you use + // KeySpec parameter in your code. However, to avoid breaking changes, KMS supports + // both parameters. + // + // Deprecated: This parameter has been deprecated. Instead, use the KeySpec + // parameter. + CustomerMasterKeySpec types.CustomerMasterKeySpec + + // A description of the KMS key. Use a description that helps you decide whether + // the KMS key is appropriate for a task. The default value is an empty string (no + // description). To set or change the description after the key is created, use + // UpdateKeyDescription. + Description *string + + // Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, + // creates a KMS key with a 256-bit AES-GCM key that is used for encryption and + // decryption, except in China Regions, where it creates a 128-bit symmetric key + // that uses SM4 encryption. For help choosing a key spec for your KMS key, see + // Choosing a KMS key type + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-types.html#symm-asymm-choose) + // in the Key Management Service Developer Guide . The KeySpec determines whether + // the KMS key contains a symmetric key or an asymmetric key pair. It also + // determines the algorithms that the KMS key supports. You can't change the + // KeySpec after the KMS key is created. To further restrict the algorithms that + // can be used with the KMS key, use a condition key in its key policy or IAM + // policy. For more information, see kms:EncryptionAlgorithm + // (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm), + // kms:MacAlgorithm + // (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-mac-algorithm) + // or kms:Signing Algorithm + // (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-signing-algorithm) + // in the Key Management Service Developer Guide . Amazon Web Services services + // that are integrated with KMS + // (http://aws.amazon.com/kms/features/#AWS_Service_Integration) use symmetric + // encryption KMS keys to protect your data. These services do not support + // asymmetric KMS keys or HMAC KMS keys. KMS supports the following key specs for + // KMS keys: + // + // * Symmetric encryption key (default) + // + // * SYMMETRIC_DEFAULT + // + // * HMAC + // keys (symmetric) + // + // * HMAC_224 + // + // * HMAC_256 + // + // * HMAC_384 + // + // * HMAC_512 + // + // * Asymmetric + // RSA key pairs + // + // * RSA_2048 + // + // * RSA_3072 + // + // * RSA_4096 + // + // * Asymmetric NIST-recommended + // elliptic curve key pairs + // + // * ECC_NIST_P256 (secp256r1) + // + // * ECC_NIST_P384 + // (secp384r1) + // + // * ECC_NIST_P521 (secp521r1) + // + // * Other asymmetric elliptic curve key + // pairs + // + // * ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies. + // + // * SM2 + // key pairs (China Regions only) + // + // * SM2 + KeySpec types.KeySpec + + // Determines the cryptographic operations + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // for which you can use the KMS key. The default value is ENCRYPT_DECRYPT. This + // parameter is optional when you are creating a symmetric encryption KMS key; + // otherwise, it is required. You can't change the KeyUsage value after the KMS key + // is created. Select only one valid value. + // + // * For symmetric encryption KMS keys, + // omit the parameter or specify ENCRYPT_DECRYPT. + // + // * For HMAC KMS keys (symmetric), + // specify GENERATE_VERIFY_MAC. + // + // * For asymmetric KMS keys with RSA key material, + // specify ENCRYPT_DECRYPT or SIGN_VERIFY. + // + // * For asymmetric KMS keys with ECC key + // material, specify SIGN_VERIFY. + // + // * For asymmetric KMS keys with SM2 key material + // (China Regions only), specify ENCRYPT_DECRYPT or SIGN_VERIFY. + KeyUsage types.KeyUsageType + + // Creates a multi-Region primary key that you can replicate into other Amazon Web + // Services Regions. You cannot change this value after you create the KMS key. For + // a multi-Region key, set this parameter to True. For a single-Region KMS key, + // omit this parameter or set it to False. The default value is False. This + // operation supports multi-Region keys, an KMS feature that lets you create + // multiple interoperable KMS keys in different Amazon Web Services Regions. + // Because these KMS keys have the same key ID, key material, and other metadata, + // you can use them interchangeably to encrypt data in one Amazon Web Services + // Region and decrypt it in a different Amazon Web Services Region without + // re-encrypting the data or making a cross-Region call. For more information about + // multi-Region keys, see Multi-Region keys in KMS + // (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) + // in the Key Management Service Developer Guide. This value creates a primary key, + // not a replica. To create a replica key, use the ReplicateKey operation. You can + // create a symmetric or asymmetric multi-Region key, and you can create a + // multi-Region key with imported key material. However, you cannot create a + // multi-Region key in a custom key store. + MultiRegion *bool + + // The source of the key material for the KMS key. You cannot change the origin + // after you create the KMS key. The default is AWS_KMS, which means that KMS + // creates the key material. To create a KMS key with no key material + // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-create-cmk.html) + // (for imported key material), set this value to EXTERNAL. For more information + // about importing key material into KMS, see Importing Key Material + // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in + // the Key Management Service Developer Guide. The EXTERNAL origin value is valid + // only for symmetric KMS keys. To create a KMS key in an CloudHSM key store + // (https://docs.aws.amazon.com/kms/latest/developerguide/create-cmk-keystore.html) + // and create its key material in the associated CloudHSM cluster, set this value + // to AWS_CLOUDHSM. You must also use the CustomKeyStoreId parameter to identify + // the CloudHSM key store. The KeySpec value must be SYMMETRIC_DEFAULT. To create a + // KMS key in an external key store + // (https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keys.html), + // set this value to EXTERNAL_KEY_STORE. You must also use the CustomKeyStoreId + // parameter to identify the external key store and the XksKeyId parameter to + // identify the associated external key. The KeySpec value must be + // SYMMETRIC_DEFAULT. + Origin types.OriginType + + // The key policy to attach to the KMS key. If you provide a key policy, it must + // meet the following criteria: + // + // * If you don't set BypassPolicyLockoutSafetyCheck + // to true, the key policy must allow the principal that is making the CreateKey + // request to make a subsequent PutKeyPolicy request on the KMS key. This reduces + // the risk that the KMS key becomes unmanageable. For more information, refer to + // the scenario in the Default Key Policy + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section of the Key Management Service Developer Guide . + // + // * Each statement in the + // key policy must contain one or more principals. The principals in the key policy + // must exist and be visible to KMS. When you create a new Amazon Web Services + // principal (for example, an IAM user or role), you might need to enforce a delay + // before including the new principal in a key policy because the new principal + // might not be immediately visible to KMS. For more information, see Changes that + // I make are not always immediately visible + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // in the Amazon Web Services Identity and Access Management User Guide. + // + // If you do + // not provide a key policy, KMS attaches a default key policy to the KMS key. For + // more information, see Default Key Policy + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) + // in the Key Management Service Developer Guide. The key policy size quota is 32 + // kilobytes (32768 bytes). For help writing and formatting a JSON policy document, + // see the IAM JSON Policy Reference + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) in + // the Identity and Access Management User Guide . + Policy *string + + // Assigns one or more tags to the KMS key. Use this parameter to tag the KMS key + // when it is created. To tag an existing KMS key, use the TagResource operation. + // Tagging or untagging a KMS key can allow or deny permission to the KMS key. For + // details, see ABAC for KMS + // (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key + // Management Service Developer Guide. To use this parameter, you must have + // kms:TagResource + // (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) + // permission in an IAM policy. Each tag consists of a tag key and a tag value. + // Both the tag key and the tag value are required, but the tag value can be an + // empty (null) string. You cannot have more than one tag on a KMS key with the + // same tag key. If you specify an existing tag key with a different tag value, KMS + // replaces the current tag value with the specified one. When you add tags to an + // Amazon Web Services resource, Amazon Web Services generates a cost allocation + // report with usage and costs aggregated by tags. Tags can also be used to control + // access to a KMS key. For details, see Tagging Keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). + Tags []types.Tag + + // Identifies the external key + // (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key) + // that serves as key material for the KMS key in an external key store + // (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html). + // Specify the ID that the external key store proxy + // (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-xks-proxy) + // uses to refer to the external key. For help, see the documentation for your + // external key store proxy. This parameter is required for a KMS key with an + // Origin value of EXTERNAL_KEY_STORE. It is not valid for KMS keys with any other + // Origin value. The external key must be an existing 256-bit AES symmetric + // encryption key hosted outside of Amazon Web Services in an external key manager + // associated with the external key store specified by the CustomKeyStoreId + // parameter. This key must be enabled and configured to perform encryption and + // decryption. Each KMS key in an external key store must use a different external + // key. For details, see Requirements for a KMS key in an external key store + // (https://docs.aws.amazon.com/create-xks-keys.html#xks-key-requirements) in the + // Key Management Service Developer Guide. Each KMS key in an external key store is + // associated two backing keys. One is key material that KMS generates. The other + // is the external key specified by this parameter. When you use the KMS key in an + // external key store to encrypt data, the encryption operation is performed first + // by KMS using the KMS key material, and then by the external key manager using + // the specified external key, a process known as double encryption. For details, + // see Double encryption + // (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-double-encryption) + // in the Key Management Service Developer Guide. + XksKeyId *string + + noSmithyDocumentSerde +} + +type CreateKeyOutput struct { + + // Metadata associated with the KMS key. + KeyMetadata *types.KeyMetadata + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateKey{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "CreateKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go new file mode 100644 index 00000000000..102d861079d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go @@ -0,0 +1,262 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Decrypts ciphertext that was encrypted by a KMS key using any of the following +// operations: +// +// * Encrypt +// +// * GenerateDataKey +// +// * GenerateDataKeyPair +// +// * +// GenerateDataKeyWithoutPlaintext +// +// * GenerateDataKeyPairWithoutPlaintext +// +// You can +// use this operation to decrypt ciphertext that was encrypted under a symmetric +// encryption KMS key or an asymmetric encryption KMS key. When the KMS key is +// asymmetric, you must specify the KMS key and the encryption algorithm that was +// used to encrypt the ciphertext. For information about asymmetric KMS keys, see +// Asymmetric KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// in the Key Management Service Developer Guide. The Decrypt operation also +// decrypts ciphertext that was encrypted outside of KMS by the public key in an +// KMS asymmetric KMS key. However, it cannot decrypt symmetric ciphertext produced +// by other libraries, such as the Amazon Web Services Encryption SDK +// (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) or Amazon +// S3 client-side encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). +// These libraries return a ciphertext format that is incompatible with KMS. If the +// ciphertext was encrypted under a symmetric encryption KMS key, the KeyId +// parameter is optional. KMS can get this information from metadata that it adds +// to the symmetric ciphertext blob. This feature adds durability to your +// implementation by ensuring that authorized users can decrypt ciphertext decades +// after it was encrypted, even if they've lost track of the key ID. However, +// specifying the KMS key is always recommended as a best practice. When you use +// the KeyId parameter to specify a KMS key, KMS only uses the KMS key you specify. +// If the ciphertext was encrypted under a different KMS key, the Decrypt operation +// fails. This practice ensures that you use the KMS key that you intend. Whenever +// possible, use key policies to give users permission to call the Decrypt +// operation on a particular KMS key, instead of using IAM policies. Otherwise, you +// might create an IAM user policy that gives the user Decrypt permission on all +// KMS keys. This user could decrypt ciphertext that was encrypted by KMS keys in +// other accounts if the key policy for the cross-account KMS key permits it. If +// you must use an IAM policy for Decrypt permissions, limit the user to particular +// KMS keys or particular trusted accounts. For details, see Best practices for IAM +// policies +// (https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policies-best-practices) +// in the Key Management Service Developer Guide. Applications in Amazon Web +// Services Nitro Enclaves can call this operation by using the Amazon Web Services +// Nitro Enclaves Development Kit +// (https://github.com/aws/aws-nitro-enclaves-sdk-c). For information about the +// supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) +// in the Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: Yes. To perform +// this operation with a KMS key in a different Amazon Web Services account, +// specify the key ARN or alias ARN in the value of the KeyId parameter. Required +// permissions: kms:Decrypt +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * Encrypt +// +// * GenerateDataKey +// +// * +// GenerateDataKeyPair +// +// * ReEncrypt +func (c *Client) Decrypt(ctx context.Context, params *DecryptInput, optFns ...func(*Options)) (*DecryptOutput, error) { + if params == nil { + params = &DecryptInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Decrypt", params, optFns, c.addOperationDecryptMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DecryptOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DecryptInput struct { + + // Ciphertext to be decrypted. The blob includes metadata. + // + // This member is required. + CiphertextBlob []byte + + // Specifies the encryption algorithm that will be used to decrypt the ciphertext. + // Specify the same algorithm that was used to encrypt the data. If you specify a + // different algorithm, the Decrypt operation fails. This parameter is required + // only when the ciphertext was encrypted under an asymmetric KMS key. The default + // value, SYMMETRIC_DEFAULT, represents the only supported algorithm that is valid + // for symmetric encryption KMS keys. + EncryptionAlgorithm types.EncryptionAlgorithmSpec + + // Specifies the encryption context to use when decrypting the data. An encryption + // context is valid only for cryptographic operations + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // with a symmetric encryption KMS key. The standard asymmetric encryption + // algorithms and HMAC algorithms that KMS uses do not support an encryption + // context. An encryption context is a collection of non-secret key-value pairs + // that represent additional authenticated data. When you use an encryption context + // to encrypt data, you must specify the same (an exact case-sensitive match) + // encryption context to decrypt the data. An encryption context is supported only + // on operations with symmetric encryption KMS keys. On operations with symmetric + // encryption KMS keys, an encryption context is optional, but it is strongly + // recommended. For more information, see Encryption context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide. + EncryptionContext map[string]string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + // Specifies the KMS key that KMS uses to decrypt the ciphertext. Enter a key ID of + // the KMS key that was used to encrypt the ciphertext. If you identify a different + // KMS key, the Decrypt operation throws an IncorrectKeyException. This parameter + // is required only when the ciphertext was encrypted under an asymmetric KMS key. + // If you used a symmetric encryption KMS key, KMS can get the KMS key from + // metadata that it adds to the symmetric ciphertext blob. However, it is always + // recommended as a best practice. This practice ensures that you use the KMS key + // that you intend. To specify a KMS key, use its key ID, key ARN, alias name, or + // alias ARN. When using an alias name, prefix it with "alias/". To specify a KMS + // key in a different Amazon Web Services account, you must use the key ARN or + // alias ARN. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key + // ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + KeyId *string + + noSmithyDocumentSerde +} + +type DecryptOutput struct { + + // The encryption algorithm that was used to decrypt the ciphertext. + EncryptionAlgorithm types.EncryptionAlgorithmSpec + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key that was used to decrypt the ciphertext. + KeyId *string + + // Decrypted plaintext data. When you use the HTTP API or the Amazon Web Services + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + Plaintext []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDecryptMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDecrypt{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDecrypt{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDecryptValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecrypt(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDecrypt(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "Decrypt", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go new file mode 100644 index 00000000000..1609069b962 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go @@ -0,0 +1,147 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the specified alias. Adding, deleting, or updating an alias can allow or +// deny permission to the KMS key. For details, see ABAC for KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key +// Management Service Developer Guide. Because an alias is not a property of a KMS +// key, you can delete and change the aliases of a KMS key without affecting the +// KMS key. Also, aliases do not appear in the response from the DescribeKey +// operation. To get the aliases of all KMS keys, use the ListAliases operation. +// Each KMS key can have multiple aliases. To change the alias of a KMS key, use +// DeleteAlias to delete the current alias and CreateAlias to create a new alias. +// To associate an existing alias with a different KMS key, call UpdateAlias. +// Cross-account use: No. You cannot perform this operation on an alias in a +// different Amazon Web Services account. Required permissions +// +// * kms:DeleteAlias +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the alias (IAM policy). +// +// * kms:DeleteAlias +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the KMS key (key policy). +// +// For details, see Controlling access to aliases +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) +// in the Key Management Service Developer Guide. Related operations: +// +// * +// CreateAlias +// +// * ListAliases +// +// * UpdateAlias +func (c *Client) DeleteAlias(ctx context.Context, params *DeleteAliasInput, optFns ...func(*Options)) (*DeleteAliasOutput, error) { + if params == nil { + params = &DeleteAliasInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteAlias", params, optFns, c.addOperationDeleteAliasMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteAliasOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteAliasInput struct { + + // The alias to be deleted. The alias name must begin with alias/ followed by the + // alias name, such as alias/ExampleAlias. + // + // This member is required. + AliasName *string + + noSmithyDocumentSerde +} + +type DeleteAliasOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteAliasMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteAlias{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteAlias{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteAliasValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteAlias(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteAlias(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "DeleteAlias", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go new file mode 100644 index 00000000000..9bf7bcbce39 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go @@ -0,0 +1,164 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). +// This operation does not affect any backing elements of the custom key store. It +// does not delete the CloudHSM cluster that is associated with an CloudHSM key +// store, or affect any users or keys in the cluster. For an external key store, it +// does not affect the external key store proxy, external key manager, or any +// external keys. This operation is part of the custom key stores +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// feature in KMS, which combines the convenience and extensive integration of KMS +// with the isolation and control of a key store that you own and manage. The +// custom key store that you delete cannot contain any KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys). +// Before deleting the key store, verify that you will never need to use any of the +// KMS keys in the key store for any cryptographic operations +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). +// Then, use ScheduleKeyDeletion to delete the KMS keys from the key store. After +// the required waiting period expires and all KMS keys are deleted from the custom +// key store, use DisconnectCustomKeyStore to disconnect the key store from KMS. +// Then, you can delete the custom key store. For keys in an CloudHSM key store, +// the ScheduleKeyDeletion operation makes a best effort to delete the key material +// from the associated cluster. However, you might need to manually delete the +// orphaned key material +// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key) +// from the cluster and its backups. KMS never creates, manages, or deletes +// cryptographic keys in the external key manager associated with an external key +// store. You must manage them using your external key manager tools. Instead of +// deleting the custom key store, consider using the DisconnectCustomKeyStore +// operation to disconnect the custom key store from its backing key store. While +// the key store is disconnected, you cannot create or use the KMS keys in the key +// store. But, you do not need to delete KMS keys and you can reconnect a +// disconnected custom key store at any time. If the operation succeeds, it returns +// a JSON object with no properties. Cross-account use: No. You cannot perform this +// operation on a custom key store in a different Amazon Web Services account. +// Required permissions: kms:DeleteCustomKeyStore +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) Related operations: +// +// * ConnectCustomKeyStore +// +// * +// CreateCustomKeyStore +// +// * DescribeCustomKeyStores +// +// * DisconnectCustomKeyStore +// +// * +// UpdateCustomKeyStore +func (c *Client) DeleteCustomKeyStore(ctx context.Context, params *DeleteCustomKeyStoreInput, optFns ...func(*Options)) (*DeleteCustomKeyStoreOutput, error) { + if params == nil { + params = &DeleteCustomKeyStoreInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteCustomKeyStore", params, optFns, c.addOperationDeleteCustomKeyStoreMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteCustomKeyStoreOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteCustomKeyStoreInput struct { + + // Enter the ID of the custom key store you want to delete. To find the ID of a + // custom key store, use the DescribeCustomKeyStores operation. + // + // This member is required. + CustomKeyStoreId *string + + noSmithyDocumentSerde +} + +type DeleteCustomKeyStoreOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteCustomKeyStoreMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteCustomKeyStoreValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteCustomKeyStore(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteCustomKeyStore(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "DeleteCustomKeyStore", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go new file mode 100644 index 00000000000..7f8839b963f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go @@ -0,0 +1,145 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes key material that you previously imported. This operation makes the +// specified KMS key unusable. For more information about importing key material +// into KMS, see Importing Key Material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in +// the Key Management Service Developer Guide. When the specified KMS key is in the +// PendingDeletion state, this operation does not change the KMS key's state. +// Otherwise, it changes the KMS key's state to PendingImport. After you delete key +// material, you can use ImportKeyMaterial to reimport the same key material into +// the KMS key. The KMS key that you use for this operation must be in a compatible +// key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:DeleteImportedKeyMaterial +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * GetParametersForImport +// +// * ImportKeyMaterial +func (c *Client) DeleteImportedKeyMaterial(ctx context.Context, params *DeleteImportedKeyMaterialInput, optFns ...func(*Options)) (*DeleteImportedKeyMaterialOutput, error) { + if params == nil { + params = &DeleteImportedKeyMaterialInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteImportedKeyMaterial", params, optFns, c.addOperationDeleteImportedKeyMaterialMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteImportedKeyMaterialOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteImportedKeyMaterialInput struct { + + // Identifies the KMS key from which you are deleting imported key material. The + // Origin of the KMS key must be EXTERNAL. Specify the key ID or key ARN of the KMS + // key. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type DeleteImportedKeyMaterialOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteImportedKeyMaterialMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteImportedKeyMaterial{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteImportedKeyMaterial{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteImportedKeyMaterialValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteImportedKeyMaterial(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteImportedKeyMaterial(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "DeleteImportedKeyMaterial", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go new file mode 100644 index 00000000000..3dfd2f9c163 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go @@ -0,0 +1,287 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets information about custom key stores +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// in the account and Region. This operation is part of the custom key stores +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// feature in KMS, which combines the convenience and extensive integration of KMS +// with the isolation and control of a key store that you own and manage. By +// default, this operation returns information about all custom key stores in the +// account and Region. To get only information about a particular custom key store, +// use either the CustomKeyStoreName or CustomKeyStoreId parameter (but not both). +// To determine whether the custom key store is connected to its CloudHSM cluster +// or external key store proxy, use the ConnectionState element in the response. If +// an attempt to connect the custom key store failed, the ConnectionState value is +// FAILED and the ConnectionErrorCode element in the response indicates the cause +// of the failure. For help interpreting the ConnectionErrorCode, see +// CustomKeyStoresListEntry. Custom key stores have a DISCONNECTED connection state +// if the key store has never been connected or you used the +// DisconnectCustomKeyStore operation to disconnect it. Otherwise, the connection +// state is CONNECTED. If your custom key store connection state is CONNECTED but +// you are having trouble using it, verify that the backing store is active and +// available. For an CloudHSM key store, verify that the associated CloudHSM +// cluster is active and contains the minimum number of HSMs required for the +// operation, if any. For an external key store, verify that the external key store +// proxy and its associated external key manager are reachable and enabled. For +// help repairing your CloudHSM key store, see the Troubleshooting CloudHSM key +// stores +// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html). For +// help repairing your external key store, see the Troubleshooting external key +// stores +// (https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html). +// Both topics are in the Key Management Service Developer Guide. Cross-account +// use: No. You cannot perform this operation on a custom key store in a different +// Amazon Web Services account. Required permissions: kms:DescribeCustomKeyStores +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) Related operations: +// +// * ConnectCustomKeyStore +// +// * +// CreateCustomKeyStore +// +// * DeleteCustomKeyStore +// +// * DisconnectCustomKeyStore +// +// * +// UpdateCustomKeyStore +func (c *Client) DescribeCustomKeyStores(ctx context.Context, params *DescribeCustomKeyStoresInput, optFns ...func(*Options)) (*DescribeCustomKeyStoresOutput, error) { + if params == nil { + params = &DescribeCustomKeyStoresInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeCustomKeyStores", params, optFns, c.addOperationDescribeCustomKeyStoresMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeCustomKeyStoresOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeCustomKeyStoresInput struct { + + // Gets only information about the specified custom key store. Enter the key store + // ID. By default, this operation gets information about all custom key stores in + // the account and Region. To limit the output to a particular custom key store, + // provide either the CustomKeyStoreId or CustomKeyStoreName parameter, but not + // both. + CustomKeyStoreId *string + + // Gets only information about the specified custom key store. Enter the friendly + // name of the custom key store. By default, this operation gets information about + // all custom key stores in the account and Region. To limit the output to a + // particular custom key store, provide either the CustomKeyStoreId or + // CustomKeyStoreName parameter, but not both. + CustomKeyStoreName *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. + Marker *string + + noSmithyDocumentSerde +} + +type DescribeCustomKeyStoresOutput struct { + + // Contains metadata about each custom key store. + CustomKeyStores []types.CustomKeyStoresListEntry + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in thisresponse to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeCustomKeyStoresMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeCustomKeyStores{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeCustomKeyStores{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeCustomKeyStores(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// DescribeCustomKeyStoresAPIClient is a client that implements the +// DescribeCustomKeyStores operation. +type DescribeCustomKeyStoresAPIClient interface { + DescribeCustomKeyStores(context.Context, *DescribeCustomKeyStoresInput, ...func(*Options)) (*DescribeCustomKeyStoresOutput, error) +} + +var _ DescribeCustomKeyStoresAPIClient = (*Client)(nil) + +// DescribeCustomKeyStoresPaginatorOptions is the paginator options for +// DescribeCustomKeyStores +type DescribeCustomKeyStoresPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeCustomKeyStoresPaginator is a paginator for DescribeCustomKeyStores +type DescribeCustomKeyStoresPaginator struct { + options DescribeCustomKeyStoresPaginatorOptions + client DescribeCustomKeyStoresAPIClient + params *DescribeCustomKeyStoresInput + nextToken *string + firstPage bool +} + +// NewDescribeCustomKeyStoresPaginator returns a new +// DescribeCustomKeyStoresPaginator +func NewDescribeCustomKeyStoresPaginator(client DescribeCustomKeyStoresAPIClient, params *DescribeCustomKeyStoresInput, optFns ...func(*DescribeCustomKeyStoresPaginatorOptions)) *DescribeCustomKeyStoresPaginator { + if params == nil { + params = &DescribeCustomKeyStoresInput{} + } + + options := DescribeCustomKeyStoresPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeCustomKeyStoresPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeCustomKeyStoresPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeCustomKeyStores page. +func (p *DescribeCustomKeyStoresPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeCustomKeyStoresOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + result, err := p.client.DescribeCustomKeyStores(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opDescribeCustomKeyStores(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "DescribeCustomKeyStores", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go new file mode 100644 index 00000000000..09613a7b024 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go @@ -0,0 +1,211 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Provides detailed information about a KMS key. You can run DescribeKey on a +// customer managed key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) +// or an Amazon Web Services managed key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). +// This detailed information includes the key ARN, creation date (and deletion +// date, if applicable), the key state, and the origin and expiration date (if any) +// of the key material. It includes fields, like KeySpec, that help you distinguish +// different types of KMS keys. It also displays the key usage (encryption, +// signing, or generating and verifying MACs) and the algorithms that the KMS key +// supports. For multi-Region keys, DescribeKey displays the primary key and all +// related replica keys. For KMS keys in CloudHSM key stores, it includes +// information about the key store, such as the key store ID and the CloudHSM +// cluster ID. For KMS keys in external key stores, it includes the custom key +// store ID and the ID of the external key. DescribeKey does not return the +// following information: +// +// * Aliases associated with the KMS key. To get this +// information, use ListAliases. +// +// * Whether automatic key rotation is enabled on +// the KMS key. To get this information, use GetKeyRotationStatus. Also, some key +// states prevent a KMS key from being automatically rotated. For details, see How +// Automatic Key Rotation Works +// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-how-it-works) +// in the Key Management Service Developer Guide. +// +// * Tags on the KMS key. To get +// this information, use ListResourceTags. +// +// * Key policies and grants on the KMS +// key. To get this information, use GetKeyPolicy and ListGrants. +// +// In general, +// DescribeKey is a non-mutating operation. It returns data about KMS keys, but +// doesn't change them. However, Amazon Web Services services use DescribeKey to +// create Amazon Web Services managed keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) +// from a predefined Amazon Web Services alias with no key ID. Cross-account use: +// Yes. To perform this operation with a KMS key in a different Amazon Web Services +// account, specify the key ARN or alias ARN in the value of the KeyId parameter. +// Required permissions: kms:DescribeKey +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * GetKeyPolicy +// +// * GetKeyRotationStatus +// +// * +// ListAliases +// +// * ListGrants +// +// * ListKeys +// +// * ListResourceTags +// +// * ListRetirableGrants +func (c *Client) DescribeKey(ctx context.Context, params *DescribeKeyInput, optFns ...func(*Options)) (*DescribeKeyOutput, error) { + if params == nil { + params = &DescribeKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeKey", params, optFns, c.addOperationDescribeKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeKeyInput struct { + + // Describes the specified KMS key. If you specify a predefined Amazon Web Services + // alias (an Amazon Web Services alias with no key ID), KMS associates the alias + // with an Amazon Web Services managed key + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html##aws-managed-cmk) + // and returns its KeyId and Arn in the response. To specify a KMS key, use its key + // ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with + // "alias/". To specify a KMS key in a different Amazon Web Services account, you + // must use the key ARN or alias ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + noSmithyDocumentSerde +} + +type DescribeKeyOutput struct { + + // Metadata associated with the key. + KeyMetadata *types.KeyMetadata + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeKey{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "DescribeKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go new file mode 100644 index 00000000000..066f679ef09 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go @@ -0,0 +1,138 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the state of a KMS key to disabled. This change temporarily prevents use of +// the KMS key for cryptographic operations +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). +// For more information about how key state affects the use of a KMS key, see Key +// states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide . The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:DisableKey +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: EnableKey +func (c *Client) DisableKey(ctx context.Context, params *DisableKeyInput, optFns ...func(*Options)) (*DisableKeyOutput, error) { + if params == nil { + params = &DisableKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DisableKey", params, optFns, c.addOperationDisableKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DisableKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DisableKeyInput struct { + + // Identifies the KMS key to disable. Specify the key ID or key ARN of the KMS key. + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type DisableKeyOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDisableKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDisableKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDisableKey{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDisableKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDisableKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "DisableKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go new file mode 100644 index 00000000000..003e50230a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go @@ -0,0 +1,167 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Disables automatic rotation of the key material +// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) of the +// specified symmetric encryption KMS key. Automatic key rotation is supported only +// on symmetric encryption KMS keys. You cannot enable automatic rotation of +// asymmetric KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), +// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), +// KMS keys with imported key material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), or +// KMS keys in a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). +// To enable or disable automatic rotation of a set of related multi-Region keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), +// set the property on the primary key. You can enable (EnableKeyRotation) and +// disable automatic rotation of the key material in customer managed KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). +// Key material rotation of Amazon Web Services managed KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) +// is not configurable. KMS always rotates the key material for every year. +// Rotation of Amazon Web Services owned KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk) +// varies. In May 2022, KMS changed the rotation schedule for Amazon Web Services +// managed keys from every three years to every year. For details, see +// EnableKeyRotation. The KMS key that you use for this operation must be in a +// compatible key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:DisableKeyRotation +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * EnableKeyRotation +// +// * GetKeyRotationStatus +func (c *Client) DisableKeyRotation(ctx context.Context, params *DisableKeyRotationInput, optFns ...func(*Options)) (*DisableKeyRotationOutput, error) { + if params == nil { + params = &DisableKeyRotationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DisableKeyRotation", params, optFns, c.addOperationDisableKeyRotationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DisableKeyRotationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DisableKeyRotationInput struct { + + // Identifies a symmetric encryption KMS key. You cannot enable or disable + // automatic rotation of asymmetric KMS keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html#asymmetric-cmks), + // HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), + // KMS keys with imported key material + // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), or + // KMS keys in a custom key store + // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). + // Specify the key ID or key ARN of the KMS key. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type DisableKeyRotationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDisableKeyRotationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDisableKeyRotation{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDisableKeyRotation{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDisableKeyRotationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableKeyRotation(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDisableKeyRotation(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "DisableKeyRotation", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go new file mode 100644 index 00000000000..c4f2441be16 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go @@ -0,0 +1,153 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Disconnects the custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// from its backing key store. This operation disconnects an CloudHSM key store +// from its associated CloudHSM cluster or disconnects an external key store from +// the external key store proxy that communicates with your external key manager. +// This operation is part of the custom key stores +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// feature in KMS, which combines the convenience and extensive integration of KMS +// with the isolation and control of a key store that you own and manage. While a +// custom key store is disconnected, you can manage the custom key store and its +// KMS keys, but you cannot create or use its KMS keys. You can reconnect the +// custom key store at any time. While a custom key store is disconnected, all +// attempts to create KMS keys in the custom key store or to use existing KMS keys +// in cryptographic operations +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// will fail. This action can prevent users from storing and accessing sensitive +// data. When you disconnect a custom key store, its ConnectionState changes to +// Disconnected. To find the connection state of a custom key store, use the +// DescribeCustomKeyStores operation. To reconnect a custom key store, use the +// ConnectCustomKeyStore operation. If the operation succeeds, it returns a JSON +// object with no properties. Cross-account use: No. You cannot perform this +// operation on a custom key store in a different Amazon Web Services account. +// Required permissions: kms:DisconnectCustomKeyStore +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) Related operations: +// +// * ConnectCustomKeyStore +// +// * +// CreateCustomKeyStore +// +// * DeleteCustomKeyStore +// +// * DescribeCustomKeyStores +// +// * +// UpdateCustomKeyStore +func (c *Client) DisconnectCustomKeyStore(ctx context.Context, params *DisconnectCustomKeyStoreInput, optFns ...func(*Options)) (*DisconnectCustomKeyStoreOutput, error) { + if params == nil { + params = &DisconnectCustomKeyStoreInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DisconnectCustomKeyStore", params, optFns, c.addOperationDisconnectCustomKeyStoreMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DisconnectCustomKeyStoreOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DisconnectCustomKeyStoreInput struct { + + // Enter the ID of the custom key store you want to disconnect. To find the ID of a + // custom key store, use the DescribeCustomKeyStores operation. + // + // This member is required. + CustomKeyStoreId *string + + noSmithyDocumentSerde +} + +type DisconnectCustomKeyStoreOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDisconnectCustomKeyStoreMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDisconnectCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDisconnectCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDisconnectCustomKeyStoreValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisconnectCustomKeyStore(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDisconnectCustomKeyStore(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "DisconnectCustomKeyStore", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go new file mode 100644 index 00000000000..f44645321a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go @@ -0,0 +1,135 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the key state of a KMS key to enabled. This allows you to use the KMS key +// for cryptographic operations +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:EnableKey +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: DisableKey +func (c *Client) EnableKey(ctx context.Context, params *EnableKeyInput, optFns ...func(*Options)) (*EnableKeyOutput, error) { + if params == nil { + params = &EnableKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "EnableKey", params, optFns, c.addOperationEnableKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*EnableKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type EnableKeyInput struct { + + // Identifies the KMS key to enable. Specify the key ID or key ARN of the KMS key. + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type EnableKeyOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationEnableKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpEnableKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpEnableKey{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpEnableKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opEnableKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "EnableKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go new file mode 100644 index 00000000000..0a7b6bbeb76 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go @@ -0,0 +1,179 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Enables automatic rotation of the key material +// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) of the +// specified symmetric encryption KMS key. When you enable automatic rotation of +// acustomer managed KMS key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk), +// KMS rotates the key material of the KMS key one year (approximately 365 days) +// from the enable date and every year thereafter. You can monitor rotation of the +// key material for your KMS keys in CloudTrail and Amazon CloudWatch. To disable +// rotation of the key material in a customer managed KMS key, use the +// DisableKeyRotation operation. Automatic key rotation is supported only on +// symmetric encryption KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks). +// You cannot enable automatic rotation of asymmetric KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), +// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), +// KMS keys with imported key material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), or +// KMS keys in a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). +// To enable or disable automatic rotation of a set of related multi-Region keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), +// set the property on the primary key. You cannot enable or disable automatic +// rotation Amazon Web Services managed KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). +// KMS always rotates the key material of Amazon Web Services managed keys every +// year. Rotation of Amazon Web Services owned KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk) +// varies. In May 2022, KMS changed the rotation schedule for Amazon Web Services +// managed keys from every three years (approximately 1,095 days) to every year +// (approximately 365 days). New Amazon Web Services managed keys are automatically +// rotated one year after they are created, and approximately every year +// thereafter. Existing Amazon Web Services managed keys are automatically rotated +// one year after their most recent rotation, and every year thereafter. The KMS +// key that you use for this operation must be in a compatible key state. For +// details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:EnableKeyRotation +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * DisableKeyRotation +// +// * GetKeyRotationStatus +func (c *Client) EnableKeyRotation(ctx context.Context, params *EnableKeyRotationInput, optFns ...func(*Options)) (*EnableKeyRotationOutput, error) { + if params == nil { + params = &EnableKeyRotationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "EnableKeyRotation", params, optFns, c.addOperationEnableKeyRotationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*EnableKeyRotationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type EnableKeyRotationInput struct { + + // Identifies a symmetric encryption KMS key. You cannot enable automatic rotation + // of asymmetric KMS keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), + // HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), + // KMS keys with imported key material + // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), or + // KMS keys in a custom key store + // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). + // To enable or disable automatic rotation of a set of related multi-Region keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), + // set the property on the primary key. Specify the key ID or key ARN of the KMS + // key. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type EnableKeyRotationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationEnableKeyRotationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpEnableKeyRotation{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpEnableKeyRotation{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpEnableKeyRotationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableKeyRotation(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opEnableKeyRotation(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "EnableKeyRotation", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go new file mode 100644 index 00000000000..e5faf29e331 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go @@ -0,0 +1,260 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Encrypts plaintext of up to 4,096 bytes using a KMS key. You can use a symmetric +// or asymmetric KMS key with a KeyUsage of ENCRYPT_DECRYPT. You can use this +// operation to encrypt small amounts of arbitrary data, such as a personal +// identifier or database password, or other sensitive information. You don't need +// to use the Encrypt operation to encrypt a data key. The GenerateDataKey and +// GenerateDataKeyPair operations return a plaintext data key and an encrypted copy +// of that data key. If you use a symmetric encryption KMS key, you can use an +// encryption context to add additional security to your encryption operation. If +// you specify an EncryptionContext when encrypting data, you must specify the same +// encryption context (a case-sensitive exact match) when decrypting the data. +// Otherwise, the request to decrypt fails with an InvalidCiphertextException. For +// more information, see Encryption Context +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// in the Key Management Service Developer Guide. If you specify an asymmetric KMS +// key, you must also specify the encryption algorithm. The algorithm must be +// compatible with the KMS key spec. When you use an asymmetric KMS key to encrypt +// or reencrypt data, be sure to record the KMS key and encryption algorithm that +// you choose. You will be required to provide the same KMS key and encryption +// algorithm when you decrypt the data. If the KMS key and algorithm do not match +// the values used to encrypt the data, the decrypt operation fails. You are not +// required to supply the key ID and encryption algorithm when you decrypt with +// symmetric encryption KMS keys because KMS stores this information in the +// ciphertext blob. KMS cannot store metadata in ciphertext generated with +// asymmetric keys. The standard format for asymmetric key ciphertext does not +// include configurable fields. The maximum size of the data that you can encrypt +// varies with the type of KMS key and the encryption algorithm that you choose. +// +// * +// Symmetric encryption KMS keys +// +// * SYMMETRIC_DEFAULT: 4096 bytes +// +// * RSA_2048 +// +// * +// RSAES_OAEP_SHA_1: 214 bytes +// +// * RSAES_OAEP_SHA_256: 190 bytes +// +// * RSA_3072 +// +// * +// RSAES_OAEP_SHA_1: 342 bytes +// +// * RSAES_OAEP_SHA_256: 318 bytes +// +// * RSA_4096 +// +// * +// RSAES_OAEP_SHA_1: 470 bytes +// +// * RSAES_OAEP_SHA_256: 446 bytes +// +// * SM2PKE: 1024 +// bytes (China Regions only) +// +// The KMS key that you use for this operation must be +// in a compatible key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: Yes. To perform this +// operation with a KMS key in a different Amazon Web Services account, specify the +// key ARN or alias ARN in the value of the KeyId parameter. Required permissions: +// kms:Encrypt +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * Decrypt +// +// * GenerateDataKey +// +// * +// GenerateDataKeyPair +func (c *Client) Encrypt(ctx context.Context, params *EncryptInput, optFns ...func(*Options)) (*EncryptOutput, error) { + if params == nil { + params = &EncryptInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Encrypt", params, optFns, c.addOperationEncryptMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*EncryptOutput) + out.ResultMetadata = metadata + return out, nil +} + +type EncryptInput struct { + + // Identifies the KMS key to use in the encryption operation. The KMS key must have + // a KeyUsage of ENCRYPT_DECRYPT. To find the KeyUsage of a KMS key, use the + // DescribeKey operation. To specify a KMS key, use its key ID, key ARN, alias + // name, or alias ARN. When using an alias name, prefix it with "alias/". To + // specify a KMS key in a different Amazon Web Services account, you must use the + // key ARN or alias ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Data to be encrypted. + // + // This member is required. + Plaintext []byte + + // Specifies the encryption algorithm that KMS will use to encrypt the plaintext + // message. The algorithm must be compatible with the KMS key that you specify. + // This parameter is required only for asymmetric KMS keys. The default value, + // SYMMETRIC_DEFAULT, is the algorithm used for symmetric encryption KMS keys. If + // you are using an asymmetric KMS key, we recommend RSAES_OAEP_SHA_256. The SM2PKE + // algorithm is only available in China Regions. + EncryptionAlgorithm types.EncryptionAlgorithmSpec + + // Specifies the encryption context that will be used to encrypt the data. An + // encryption context is valid only for cryptographic operations + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // with a symmetric encryption KMS key. The standard asymmetric encryption + // algorithms and HMAC algorithms that KMS uses do not support an encryption + // context. An encryption context is a collection of non-secret key-value pairs + // that represent additional authenticated data. When you use an encryption context + // to encrypt data, you must specify the same (an exact case-sensitive match) + // encryption context to decrypt the data. An encryption context is supported only + // on operations with symmetric encryption KMS keys. On operations with symmetric + // encryption KMS keys, an encryption context is optional, but it is strongly + // recommended. For more information, see Encryption context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide. + EncryptionContext map[string]string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + noSmithyDocumentSerde +} + +type EncryptOutput struct { + + // The encrypted plaintext. When you use the HTTP API or the Amazon Web Services + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + CiphertextBlob []byte + + // The encryption algorithm that was used to encrypt the plaintext. + EncryptionAlgorithm types.EncryptionAlgorithmSpec + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key that was used to encrypt the plaintext. + KeyId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationEncryptMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpEncrypt{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpEncrypt{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpEncryptValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEncrypt(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opEncrypt(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "Encrypt", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go new file mode 100644 index 00000000000..073edf96e1d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go @@ -0,0 +1,266 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a unique symmetric data key for use outside of KMS. This operation +// returns a plaintext copy of the data key and a copy that is encrypted under a +// symmetric encryption KMS key that you specify. The bytes in the plaintext key +// are random; they are not related to the caller or the KMS key. You can use the +// plaintext key to encrypt your data outside of KMS and store the encrypted data +// key with the encrypted data. To generate a data key, specify the symmetric +// encryption KMS key that will be used to encrypt the data key. You cannot use an +// asymmetric KMS key to encrypt data keys. To get the type of your KMS key, use +// the DescribeKey operation. You must also specify the length of the data key. Use +// either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and +// 256-bit data keys, use the KeySpec parameter. To generate an SM4 data key (China +// Regions only), specify a KeySpec value of AES_128 or NumberOfBytes value of 128. +// The symmetric encryption key used in China Regions to encrypt your data key is +// an SM4 encryption key. To get only an encrypted copy of the data key, use +// GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use +// the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get +// a cryptographically secure random byte string, use GenerateRandom. You can use +// an optional encryption context to add additional security to the encryption +// operation. If you specify an EncryptionContext, you must specify the same +// encryption context (a case-sensitive exact match) when decrypting the encrypted +// data key. Otherwise, the request to decrypt fails with an +// InvalidCiphertextException. For more information, see Encryption Context +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// in the Key Management Service Developer Guide. Applications in Amazon Web +// Services Nitro Enclaves can call this operation by using the Amazon Web Services +// Nitro Enclaves Development Kit +// (https://github.com/aws/aws-nitro-enclaves-sdk-c). For information about the +// supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) +// in the Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. How to use your data key We +// recommend that you use the following pattern to encrypt data locally in your +// application. You can write your own code or use a client-side encryption +// library, such as the Amazon Web Services Encryption SDK +// (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/), the Amazon +// DynamoDB Encryption Client +// (https://docs.aws.amazon.com/dynamodb-encryption-client/latest/devguide/), or +// Amazon S3 client-side encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) +// to do these tasks for you. To encrypt data outside of KMS: +// +// * Use the +// GenerateDataKey operation to get a data key. +// +// * Use the plaintext data key (in +// the Plaintext field of the response) to encrypt your data outside of KMS. Then +// erase the plaintext data key from memory. +// +// * Store the encrypted data key (in +// the CiphertextBlob field of the response) with the encrypted data. +// +// To decrypt +// data outside of KMS: +// +// * Use the Decrypt operation to decrypt the encrypted data +// key. The operation returns a plaintext copy of the data key. +// +// * Use the +// plaintext data key to decrypt data outside of KMS, then erase the plaintext data +// key from memory. +// +// Cross-account use: Yes. To perform this operation with a KMS +// key in a different Amazon Web Services account, specify the key ARN or alias ARN +// in the value of the KeyId parameter. Required permissions: kms:GenerateDataKey +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * Decrypt +// +// * Encrypt +// +// * GenerateDataKeyPair +// +// * +// GenerateDataKeyPairWithoutPlaintext +// +// * GenerateDataKeyWithoutPlaintext +func (c *Client) GenerateDataKey(ctx context.Context, params *GenerateDataKeyInput, optFns ...func(*Options)) (*GenerateDataKeyOutput, error) { + if params == nil { + params = &GenerateDataKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateDataKey", params, optFns, c.addOperationGenerateDataKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateDataKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateDataKeyInput struct { + + // Specifies the symmetric encryption KMS key that encrypts the data key. You + // cannot specify an asymmetric KMS key or a KMS key in a custom key store. To get + // the type and origin of your KMS key, use the DescribeKey operation. To specify a + // KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias + // name, prefix it with "alias/". To specify a KMS key in a different Amazon Web + // Services account, you must use the key ARN or alias ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Specifies the encryption context that will be used when encrypting the data key. + // An encryption context is a collection of non-secret key-value pairs that + // represent additional authenticated data. When you use an encryption context to + // encrypt data, you must specify the same (an exact case-sensitive match) + // encryption context to decrypt the data. An encryption context is supported only + // on operations with symmetric encryption KMS keys. On operations with symmetric + // encryption KMS keys, an encryption context is optional, but it is strongly + // recommended. For more information, see Encryption context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide. + EncryptionContext map[string]string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + // Specifies the length of the data key. Use AES_128 to generate a 128-bit + // symmetric key, or AES_256 to generate a 256-bit symmetric key. You must specify + // either the KeySpec or the NumberOfBytes parameter (but not both) in every + // GenerateDataKey request. + KeySpec types.DataKeySpec + + // Specifies the length of the data key in bytes. For example, use the value 64 to + // generate a 512-bit data key (64 bytes is 512 bits). For 128-bit (16-byte) and + // 256-bit (32-byte) data keys, use the KeySpec parameter. You must specify either + // the KeySpec or the NumberOfBytes parameter (but not both) in every + // GenerateDataKey request. + NumberOfBytes *int32 + + noSmithyDocumentSerde +} + +type GenerateDataKeyOutput struct { + + // The encrypted copy of the data key. When you use the HTTP API or the Amazon Web + // Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + CiphertextBlob []byte + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key that encrypted the data key. + KeyId *string + + // The plaintext data key. When you use the HTTP API or the Amazon Web Services + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. Use this + // data key to encrypt your data outside of KMS. Then, remove it from memory as + // soon as possible. + Plaintext []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateDataKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateDataKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateDataKey{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGenerateDataKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateDataKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GenerateDataKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go new file mode 100644 index 00000000000..f67b704cc0c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go @@ -0,0 +1,248 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a unique asymmetric data key pair for use outside of KMS. This operation +// returns a plaintext public key, a plaintext private key, and a copy of the +// private key that is encrypted under the symmetric encryption KMS key you +// specify. You can use the data key pair to perform asymmetric cryptography and +// implement digital signatures outside of KMS. The bytes in the keys are random; +// they not related to the caller or to the KMS key that is used to encrypt the +// private key. You can use the public key that GenerateDataKeyPair returns to +// encrypt data or verify a signature outside of KMS. Then, store the encrypted +// private key with the data. When you are ready to decrypt data or sign a message, +// you can use the Decrypt operation to decrypt the encrypted private key. To +// generate a data key pair, you must specify a symmetric encryption KMS key to +// encrypt the private key in a data key pair. You cannot use an asymmetric KMS key +// or a KMS key in a custom key store. To get the type and origin of your KMS key, +// use the DescribeKey operation. Use the KeyPairSpec parameter to choose an RSA or +// Elliptic Curve (ECC) data key pair. In China Regions, you can also choose an SM2 +// data key pair. KMS recommends that you use ECC key pairs for signing, and use +// RSA and SM2 key pairs for either encryption or signing, but not both. However, +// KMS cannot enforce any restrictions on the use of data key pairs outside of KMS. +// If you are using the data key pair to encrypt data, or for any operation where +// you don't immediately need a private key, consider using the +// GenerateDataKeyPairWithoutPlaintext operation. +// GenerateDataKeyPairWithoutPlaintext returns a plaintext public key and an +// encrypted private key, but omits the plaintext private key that you need only to +// decrypt ciphertext or sign a message. Later, when you need to decrypt the data +// or sign a message, use the Decrypt operation to decrypt the encrypted private +// key in the data key pair. GenerateDataKeyPair returns a unique data key pair for +// each request. The bytes in the keys are random; they are not related to the +// caller or the KMS key that is used to encrypt the private key. The public key is +// a DER-encoded X.509 SubjectPublicKeyInfo, as specified in RFC 5280 +// (https://tools.ietf.org/html/rfc5280). The private key is a DER-encoded PKCS8 +// PrivateKeyInfo, as specified in RFC 5958 (https://tools.ietf.org/html/rfc5958). +// You can use an optional encryption context to add additional security to the +// encryption operation. If you specify an EncryptionContext, you must specify the +// same encryption context (a case-sensitive exact match) when decrypting the +// encrypted data key. Otherwise, the request to decrypt fails with an +// InvalidCiphertextException. For more information, see Encryption Context +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// in the Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: Yes. To perform +// this operation with a KMS key in a different Amazon Web Services account, +// specify the key ARN or alias ARN in the value of the KeyId parameter. Required +// permissions: kms:GenerateDataKeyPair +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * Decrypt +// +// * Encrypt +// +// * GenerateDataKey +// +// * +// GenerateDataKeyPairWithoutPlaintext +// +// * GenerateDataKeyWithoutPlaintext +func (c *Client) GenerateDataKeyPair(ctx context.Context, params *GenerateDataKeyPairInput, optFns ...func(*Options)) (*GenerateDataKeyPairOutput, error) { + if params == nil { + params = &GenerateDataKeyPairInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateDataKeyPair", params, optFns, c.addOperationGenerateDataKeyPairMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateDataKeyPairOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateDataKeyPairInput struct { + + // Specifies the symmetric encryption KMS key that encrypts the private key in the + // data key pair. You cannot specify an asymmetric KMS key or a KMS key in a custom + // key store. To get the type and origin of your KMS key, use the DescribeKey + // operation. To specify a KMS key, use its key ID, key ARN, alias name, or alias + // ARN. When using an alias name, prefix it with "alias/". To specify a KMS key in + // a different Amazon Web Services account, you must use the key ARN or alias ARN. + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Determines the type of data key pair that is generated. The KMS rule that + // restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or + // to sign and verify (but not both), and the rule that permits you to use ECC KMS + // keys only to sign and verify, are not effective on data key pairs, which are + // used outside of KMS. The SM2 key spec is only available in China Regions. + // + // This member is required. + KeyPairSpec types.DataKeyPairSpec + + // Specifies the encryption context that will be used when encrypting the private + // key in the data key pair. An encryption context is a collection of non-secret + // key-value pairs that represent additional authenticated data. When you use an + // encryption context to encrypt data, you must specify the same (an exact + // case-sensitive match) encryption context to decrypt the data. An encryption + // context is supported only on operations with symmetric encryption KMS keys. On + // operations with symmetric encryption KMS keys, an encryption context is + // optional, but it is strongly recommended. For more information, see Encryption + // context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide. + EncryptionContext map[string]string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + noSmithyDocumentSerde +} + +type GenerateDataKeyPairOutput struct { + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key that encrypted the private key. + KeyId *string + + // The type of data key pair that was generated. + KeyPairSpec types.DataKeyPairSpec + + // The encrypted copy of the private key. When you use the HTTP API or the Amazon + // Web Services CLI, the value is Base64-encoded. Otherwise, it is not + // Base64-encoded. + PrivateKeyCiphertextBlob []byte + + // The plaintext copy of the private key. When you use the HTTP API or the Amazon + // Web Services CLI, the value is Base64-encoded. Otherwise, it is not + // Base64-encoded. + PrivateKeyPlaintext []byte + + // The public key (in plaintext). When you use the HTTP API or the Amazon Web + // Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + PublicKey []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateDataKeyPairMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateDataKeyPair{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateDataKeyPair{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGenerateDataKeyPairValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKeyPair(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateDataKeyPair(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GenerateDataKeyPair", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go new file mode 100644 index 00000000000..dc68638f205 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go @@ -0,0 +1,235 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a unique asymmetric data key pair for use outside of KMS. This operation +// returns a plaintext public key and a copy of the private key that is encrypted +// under the symmetric encryption KMS key you specify. Unlike GenerateDataKeyPair, +// this operation does not return a plaintext private key. The bytes in the keys +// are random; they are not related to the caller or to the KMS key that is used to +// encrypt the private key. You can use the public key that +// GenerateDataKeyPairWithoutPlaintext returns to encrypt data or verify a +// signature outside of KMS. Then, store the encrypted private key with the data. +// When you are ready to decrypt data or sign a message, you can use the Decrypt +// operation to decrypt the encrypted private key. To generate a data key pair, you +// must specify a symmetric encryption KMS key to encrypt the private key in a data +// key pair. You cannot use an asymmetric KMS key or a KMS key in a custom key +// store. To get the type and origin of your KMS key, use the DescribeKey +// operation. Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve +// (ECC) data key pair. In China Regions, you can also choose an SM2 data key pair. +// KMS recommends that you use ECC key pairs for signing, and use RSA and SM2 key +// pairs for either encryption or signing, but not both. However, KMS cannot +// enforce any restrictions on the use of data key pairs outside of KMS. +// GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each +// request. The bytes in the key are not related to the caller or KMS key that is +// used to encrypt the private key. The public key is a DER-encoded X.509 +// SubjectPublicKeyInfo, as specified in RFC 5280 +// (https://tools.ietf.org/html/rfc5280). You can use an optional encryption +// context to add additional security to the encryption operation. If you specify +// an EncryptionContext, you must specify the same encryption context (a +// case-sensitive exact match) when decrypting the encrypted data key. Otherwise, +// the request to decrypt fails with an InvalidCiphertextException. For more +// information, see Encryption Context +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// in the Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: Yes. To perform +// this operation with a KMS key in a different Amazon Web Services account, +// specify the key ARN or alias ARN in the value of the KeyId parameter. Required +// permissions: kms:GenerateDataKeyPairWithoutPlaintext +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * Decrypt +// +// * Encrypt +// +// * GenerateDataKey +// +// * +// GenerateDataKeyPair +// +// * GenerateDataKeyWithoutPlaintext +func (c *Client) GenerateDataKeyPairWithoutPlaintext(ctx context.Context, params *GenerateDataKeyPairWithoutPlaintextInput, optFns ...func(*Options)) (*GenerateDataKeyPairWithoutPlaintextOutput, error) { + if params == nil { + params = &GenerateDataKeyPairWithoutPlaintextInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateDataKeyPairWithoutPlaintext", params, optFns, c.addOperationGenerateDataKeyPairWithoutPlaintextMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateDataKeyPairWithoutPlaintextOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateDataKeyPairWithoutPlaintextInput struct { + + // Specifies the symmetric encryption KMS key that encrypts the private key in the + // data key pair. You cannot specify an asymmetric KMS key or a KMS key in a custom + // key store. To get the type and origin of your KMS key, use the DescribeKey + // operation. To specify a KMS key, use its key ID, key ARN, alias name, or alias + // ARN. When using an alias name, prefix it with "alias/". To specify a KMS key in + // a different Amazon Web Services account, you must use the key ARN or alias ARN. + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Determines the type of data key pair that is generated. The KMS rule that + // restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or + // to sign and verify (but not both), and the rule that permits you to use ECC KMS + // keys only to sign and verify, are not effective on data key pairs, which are + // used outside of KMS. The SM2 key spec is only available in China Regions. + // + // This member is required. + KeyPairSpec types.DataKeyPairSpec + + // Specifies the encryption context that will be used when encrypting the private + // key in the data key pair. An encryption context is a collection of non-secret + // key-value pairs that represent additional authenticated data. When you use an + // encryption context to encrypt data, you must specify the same (an exact + // case-sensitive match) encryption context to decrypt the data. An encryption + // context is supported only on operations with symmetric encryption KMS keys. On + // operations with symmetric encryption KMS keys, an encryption context is + // optional, but it is strongly recommended. For more information, see Encryption + // context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide. + EncryptionContext map[string]string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + noSmithyDocumentSerde +} + +type GenerateDataKeyPairWithoutPlaintextOutput struct { + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key that encrypted the private key. + KeyId *string + + // The type of data key pair that was generated. + KeyPairSpec types.DataKeyPairSpec + + // The encrypted copy of the private key. When you use the HTTP API or the Amazon + // Web Services CLI, the value is Base64-encoded. Otherwise, it is not + // Base64-encoded. + PrivateKeyCiphertextBlob []byte + + // The public key (in plaintext). When you use the HTTP API or the Amazon Web + // Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + PublicKey []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateDataKeyPairWithoutPlaintextMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateDataKeyPairWithoutPlaintext{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGenerateDataKeyPairWithoutPlaintextValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKeyPairWithoutPlaintext(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateDataKeyPairWithoutPlaintext(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GenerateDataKeyPairWithoutPlaintext", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go new file mode 100644 index 00000000000..06a49b6eac6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go @@ -0,0 +1,231 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a unique symmetric data key for use outside of KMS. This operation +// returns a data key that is encrypted under a symmetric encryption KMS key that +// you specify. The bytes in the key are random; they are not related to the caller +// or to the KMS key. GenerateDataKeyWithoutPlaintext is identical to the +// GenerateDataKey operation except that it does not return a plaintext copy of the +// data key. This operation is useful for systems that need to encrypt data at some +// point, but not immediately. When you need to encrypt the data, you call the +// Decrypt operation on the encrypted copy of the key. It's also useful in +// distributed systems with different levels of trust. For example, you might store +// encrypted data in containers. One component of your system creates new +// containers and stores an encrypted data key with each container. Then, a +// different component puts the data into the containers. That component first +// decrypts the data key, uses the plaintext data key to encrypt data, puts the +// encrypted data into the container, and then destroys the plaintext data key. In +// this system, the component that creates the containers never sees the plaintext +// data key. To request an asymmetric data key pair, use the GenerateDataKeyPair or +// GenerateDataKeyPairWithoutPlaintext operations. To generate a data key, you must +// specify the symmetric encryption KMS key that is used to encrypt the data key. +// You cannot use an asymmetric KMS key or a key in a custom key store to generate +// a data key. To get the type of your KMS key, use the DescribeKey operation. You +// must also specify the length of the data key. Use either the KeySpec or +// NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use +// the KeySpec parameter. To generate an SM4 data key (China Regions only), specify +// a KeySpec value of AES_128 or NumberOfBytes value of 128. The symmetric +// encryption key used in China Regions to encrypt your data key is an SM4 +// encryption key. If the operation succeeds, you will find the encrypted copy of +// the data key in the CiphertextBlob field. You can use an optional encryption +// context to add additional security to the encryption operation. If you specify +// an EncryptionContext, you must specify the same encryption context (a +// case-sensitive exact match) when decrypting the encrypted data key. Otherwise, +// the request to decrypt fails with an InvalidCiphertextException. For more +// information, see Encryption Context +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// in the Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: Yes. To perform +// this operation with a KMS key in a different Amazon Web Services account, +// specify the key ARN or alias ARN in the value of the KeyId parameter. Required +// permissions: kms:GenerateDataKeyWithoutPlaintext +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * Decrypt +// +// * Encrypt +// +// * GenerateDataKey +// +// * +// GenerateDataKeyPair +// +// * GenerateDataKeyPairWithoutPlaintext +func (c *Client) GenerateDataKeyWithoutPlaintext(ctx context.Context, params *GenerateDataKeyWithoutPlaintextInput, optFns ...func(*Options)) (*GenerateDataKeyWithoutPlaintextOutput, error) { + if params == nil { + params = &GenerateDataKeyWithoutPlaintextInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateDataKeyWithoutPlaintext", params, optFns, c.addOperationGenerateDataKeyWithoutPlaintextMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateDataKeyWithoutPlaintextOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateDataKeyWithoutPlaintextInput struct { + + // Specifies the symmetric encryption KMS key that encrypts the data key. You + // cannot specify an asymmetric KMS key or a KMS key in a custom key store. To get + // the type and origin of your KMS key, use the DescribeKey operation. To specify a + // KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias + // name, prefix it with "alias/". To specify a KMS key in a different Amazon Web + // Services account, you must use the key ARN or alias ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Specifies the encryption context that will be used when encrypting the data key. + // An encryption context is a collection of non-secret key-value pairs that + // represent additional authenticated data. When you use an encryption context to + // encrypt data, you must specify the same (an exact case-sensitive match) + // encryption context to decrypt the data. An encryption context is supported only + // on operations with symmetric encryption KMS keys. On operations with symmetric + // encryption KMS keys, an encryption context is optional, but it is strongly + // recommended. For more information, see Encryption context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide. + EncryptionContext map[string]string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + // The length of the data key. Use AES_128 to generate a 128-bit symmetric key, or + // AES_256 to generate a 256-bit symmetric key. + KeySpec types.DataKeySpec + + // The length of the data key in bytes. For example, use the value 64 to generate a + // 512-bit data key (64 bytes is 512 bits). For common key lengths (128-bit and + // 256-bit symmetric keys), we recommend that you use the KeySpec field instead of + // this one. + NumberOfBytes *int32 + + noSmithyDocumentSerde +} + +type GenerateDataKeyWithoutPlaintextOutput struct { + + // The encrypted data key. When you use the HTTP API or the Amazon Web Services + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + CiphertextBlob []byte + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key that encrypted the data key. + KeyId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateDataKeyWithoutPlaintextMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateDataKeyWithoutPlaintext{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGenerateDataKeyWithoutPlaintextValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKeyWithoutPlaintext(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateDataKeyWithoutPlaintext(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GenerateDataKeyWithoutPlaintext", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go new file mode 100644 index 00000000000..43b03293207 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go @@ -0,0 +1,181 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Generates a hash-based message authentication code (HMAC) for a message using an +// HMAC KMS key and a MAC algorithm that the key supports. HMAC KMS keys and the +// HMAC algorithms that KMS uses conform to industry standards defined in RFC 2104 +// (https://datatracker.ietf.org/doc/html/rfc2104). You can use value that +// GenerateMac returns in the VerifyMac operation to demonstrate that the original +// message has not changed. Also, because a secret key is used to create the hash, +// you can verify that the party that generated the hash has the required secret +// key. You can also use the raw result to implement HMAC-based algorithms such as +// key derivation functions. This operation is part of KMS support for HMAC KMS +// keys. For details, see HMAC keys in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) in the Key +// Management Service Developer Guide . Best practices recommend that you limit the +// time during which any signing mechanism, including an HMAC, is effective. This +// deters an attack where the actor uses a signed message to establish validity +// repeatedly or long after the message is superseded. HMAC tags do not include a +// timestamp, but you can include a timestamp in the token or message to help you +// detect when its time to refresh the HMAC. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: Yes. To perform +// this operation with a KMS key in a different Amazon Web Services account, +// specify the key ARN or alias ARN in the value of the KeyId parameter. Required +// permissions: kms:GenerateMac +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: VerifyMac +func (c *Client) GenerateMac(ctx context.Context, params *GenerateMacInput, optFns ...func(*Options)) (*GenerateMacOutput, error) { + if params == nil { + params = &GenerateMacInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateMac", params, optFns, c.addOperationGenerateMacMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateMacOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateMacInput struct { + + // The HMAC KMS key to use in the operation. The MAC algorithm computes the HMAC + // for the message and the key as described in RFC 2104 + // (https://datatracker.ietf.org/doc/html/rfc2104). To identify an HMAC KMS key, + // use the DescribeKey operation and see the KeySpec field in the response. + // + // This member is required. + KeyId *string + + // The MAC algorithm used in the operation. The algorithm must be compatible with + // the HMAC KMS key that you specify. To find the MAC algorithms that your HMAC KMS + // key supports, use the DescribeKey operation and see the MacAlgorithms field in + // the DescribeKey response. + // + // This member is required. + MacAlgorithm types.MacAlgorithmSpec + + // The message to be hashed. Specify a message of up to 4,096 bytes. GenerateMac + // and VerifyMac do not provide special handling for message digests. If you + // generate an HMAC for a hash digest of a message, you must verify the HMAC of the + // same hash digest. + // + // This member is required. + Message []byte + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + noSmithyDocumentSerde +} + +type GenerateMacOutput struct { + + // The HMAC KMS key used in the operation. + KeyId *string + + // The hash-based message authentication code (HMAC) that was generated for the + // specified message, HMAC KMS key, and MAC algorithm. This is the standard, raw + // HMAC defined in RFC 2104 (https://datatracker.ietf.org/doc/html/rfc2104). + Mac []byte + + // The MAC algorithm that was used to generate the HMAC. + MacAlgorithm types.MacAlgorithmSpec + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateMacMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateMac{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateMac{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGenerateMacValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateMac(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateMac(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GenerateMac", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go new file mode 100644 index 00000000000..7ed73007f9e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go @@ -0,0 +1,140 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a random byte string that is cryptographically secure. You must use the +// NumberOfBytes parameter to specify the length of the random byte string. There +// is no default value for string length. By default, the random byte string is +// generated in KMS. To generate the byte string in the CloudHSM cluster associated +// with an CloudHSM key store, use the CustomKeyStoreId parameter. Applications in +// Amazon Web Services Nitro Enclaves can call this operation by using the Amazon +// Web Services Nitro Enclaves Development Kit +// (https://github.com/aws/aws-nitro-enclaves-sdk-c). For information about the +// supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) +// in the Key Management Service Developer Guide. For more information about +// entropy and random number generation, see Key Management Service Cryptographic +// Details (https://docs.aws.amazon.com/kms/latest/cryptographic-details/). +// Cross-account use: Not applicable. GenerateRandom does not use any +// account-specific resources, such as KMS keys. Required permissions: +// kms:GenerateRandom +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) +func (c *Client) GenerateRandom(ctx context.Context, params *GenerateRandomInput, optFns ...func(*Options)) (*GenerateRandomOutput, error) { + if params == nil { + params = &GenerateRandomInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateRandom", params, optFns, c.addOperationGenerateRandomMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateRandomOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateRandomInput struct { + + // Generates the random byte string in the CloudHSM cluster that is associated with + // the specified CloudHSM key store. To find the ID of a custom key store, use the + // DescribeCustomKeyStores operation. External key store IDs are not valid for this + // parameter. If you specify the ID of an external key store, GenerateRandom throws + // an UnsupportedOperationException. + CustomKeyStoreId *string + + // The length of the random byte string. This parameter is required. + NumberOfBytes *int32 + + noSmithyDocumentSerde +} + +type GenerateRandomOutput struct { + + // The random byte string. When you use the HTTP API or the Amazon Web Services + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + Plaintext []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateRandomMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateRandom{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateRandom{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateRandom(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateRandom(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GenerateRandom", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go new file mode 100644 index 00000000000..ca9e135bd20 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go @@ -0,0 +1,140 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets a key policy attached to the specified KMS key. Cross-account use: No. You +// cannot perform this operation on a KMS key in a different Amazon Web Services +// account. Required permissions: kms:GetKeyPolicy +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: PutKeyPolicy +func (c *Client) GetKeyPolicy(ctx context.Context, params *GetKeyPolicyInput, optFns ...func(*Options)) (*GetKeyPolicyOutput, error) { + if params == nil { + params = &GetKeyPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetKeyPolicy", params, optFns, c.addOperationGetKeyPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetKeyPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetKeyPolicyInput struct { + + // Gets the key policy for the specified KMS key. Specify the key ID or key ARN of + // the KMS key. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key + // ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // Specifies the name of the key policy. The only valid name is default. To get the + // names of key policies, use ListKeyPolicies. + // + // This member is required. + PolicyName *string + + noSmithyDocumentSerde +} + +type GetKeyPolicyOutput struct { + + // A key policy document in JSON format. + Policy *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetKeyPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetKeyPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetKeyPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetKeyPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetKeyPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetKeyPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GetKeyPolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go new file mode 100644 index 00000000000..0d8c4320cc2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go @@ -0,0 +1,186 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets a Boolean value that indicates whether automatic rotation of the key +// material +// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) is +// enabled for the specified KMS key. When you enable automatic rotation for +// customer managed KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk), +// KMS rotates the key material of the KMS key one year (approximately 365 days) +// from the enable date and every year thereafter. You can monitor rotation of the +// key material for your KMS keys in CloudTrail and Amazon CloudWatch. Automatic +// key rotation is supported only on symmetric encryption KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks). +// You cannot enable automatic rotation of asymmetric KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), +// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), +// KMS keys with imported key material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), or +// KMS keys in a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). +// To enable or disable automatic rotation of a set of related multi-Region keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), +// set the property on the primary key.. You can enable (EnableKeyRotation) and +// disable automatic rotation (DisableKeyRotation) of the key material in customer +// managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) +// is not configurable. KMS always rotates the key material in Amazon Web Services +// managed KMS keys every year. The key rotation status for Amazon Web Services +// managed KMS keys is always true. In May 2022, KMS changed the rotation schedule +// for Amazon Web Services managed keys from every three years to every year. For +// details, see EnableKeyRotation. The KMS key that you use for this operation must +// be in a compatible key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. +// +// * Disabled: The key rotation status +// does not change when you disable a KMS key. However, while the KMS key is +// disabled, KMS does not rotate the key material. When you re-enable the KMS key, +// rotation resumes. If the key material in the re-enabled KMS key hasn't been +// rotated in one year, KMS rotates it immediately, and every year thereafter. If +// it's been less than a year since the key material in the re-enabled KMS key was +// rotated, the KMS key resumes its prior rotation schedule. +// +// * Pending deletion: +// While a KMS key is pending deletion, its key rotation status is false and KMS +// does not rotate the key material. If you cancel the deletion, the original key +// rotation status returns to true. +// +// Cross-account use: Yes. To perform this +// operation on a KMS key in a different Amazon Web Services account, specify the +// key ARN in the value of the KeyId parameter. Required permissions: +// kms:GetKeyRotationStatus +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * DisableKeyRotation +// +// * EnableKeyRotation +func (c *Client) GetKeyRotationStatus(ctx context.Context, params *GetKeyRotationStatusInput, optFns ...func(*Options)) (*GetKeyRotationStatusOutput, error) { + if params == nil { + params = &GetKeyRotationStatusInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetKeyRotationStatus", params, optFns, c.addOperationGetKeyRotationStatusMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetKeyRotationStatusOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetKeyRotationStatusInput struct { + + // Gets the rotation status for the specified KMS key. Specify the key ID or key + // ARN of the KMS key. To specify a KMS key in a different Amazon Web Services + // account, you must use the key ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type GetKeyRotationStatusOutput struct { + + // A Boolean value that specifies whether key rotation is enabled. + KeyRotationEnabled bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetKeyRotationStatusMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetKeyRotationStatus{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetKeyRotationStatus{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetKeyRotationStatusValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetKeyRotationStatus(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetKeyRotationStatus(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GetKeyRotationStatus", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go new file mode 100644 index 00000000000..36f6eb2d088 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go @@ -0,0 +1,190 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Returns the items you need to import key material into a symmetric encryption +// KMS key. For more information about importing key material into KMS, see +// Importing key material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in +// the Key Management Service Developer Guide. This operation returns a public key +// and an import token. Use the public key to encrypt the symmetric key material. +// Store the import token to send with a subsequent ImportKeyMaterial request. You +// must specify the key ID of the symmetric encryption KMS key into which you will +// import key material. The KMS key Origin must be EXTERNAL. You must also specify +// the wrapping algorithm and type of wrapping key (public key) that you will use +// to encrypt the key material. You cannot perform this operation on an asymmetric +// KMS key, an HMAC KMS key, or on any KMS key in a different Amazon Web Services +// account. To import key material, you must use the public key and import token +// from the same response. These items are valid for 24 hours. The expiration date +// and time appear in the GetParametersForImport response. You cannot use an +// expired token in an ImportKeyMaterial request. If your key and token expire, +// send another GetParametersForImport request. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:GetParametersForImport +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * ImportKeyMaterial +// +// * +// DeleteImportedKeyMaterial +func (c *Client) GetParametersForImport(ctx context.Context, params *GetParametersForImportInput, optFns ...func(*Options)) (*GetParametersForImportOutput, error) { + if params == nil { + params = &GetParametersForImportInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetParametersForImport", params, optFns, c.addOperationGetParametersForImportMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetParametersForImportOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetParametersForImportInput struct { + + // The identifier of the symmetric encryption KMS key into which you will import + // key material. The Origin of the KMS key must be EXTERNAL. Specify the key ID or + // key ARN of the KMS key. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // The algorithm you will use to encrypt the key material before importing it with + // ImportKeyMaterial. For more information, see Encrypt the Key Material + // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-encrypt-key-material.html) + // in the Key Management Service Developer Guide. + // + // This member is required. + WrappingAlgorithm types.AlgorithmSpec + + // The type of wrapping key (public key) to return in the response. Only 2048-bit + // RSA public keys are supported. + // + // This member is required. + WrappingKeySpec types.WrappingKeySpec + + noSmithyDocumentSerde +} + +type GetParametersForImportOutput struct { + + // The import token to send in a subsequent ImportKeyMaterial request. + ImportToken []byte + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key to use in a subsequent ImportKeyMaterial request. This is the + // same KMS key specified in the GetParametersForImport request. + KeyId *string + + // The time at which the import token and public key are no longer valid. After + // this time, you cannot use them to make an ImportKeyMaterial request and you must + // send another GetParametersForImport request to get new ones. + ParametersValidTo *time.Time + + // The public key to use to encrypt the key material before importing it with + // ImportKeyMaterial. + PublicKey []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetParametersForImportMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetParametersForImport{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetParametersForImport{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetParametersForImportValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetParametersForImport(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetParametersForImport(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GetParametersForImport", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go new file mode 100644 index 00000000000..cd093fada63 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go @@ -0,0 +1,235 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the public key of an asymmetric KMS key. Unlike the private key of a +// asymmetric KMS key, which never leaves KMS unencrypted, callers with +// kms:GetPublicKey permission can download the public key of an asymmetric KMS +// key. You can share the public key to allow others to encrypt messages and verify +// signatures outside of KMS. For information about asymmetric KMS keys, see +// Asymmetric KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// in the Key Management Service Developer Guide. You do not need to download the +// public key. Instead, you can use the public key within KMS by calling the +// Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric +// KMS key. When you use the public key within KMS, you benefit from the +// authentication, authorization, and logging that are part of every KMS operation. +// You also reduce of risk of encrypting data that cannot be decrypted. These +// features are not effective outside of KMS. To help you use the public key safely +// outside of KMS, GetPublicKey returns important information about the public key +// in the response, including: +// +// * KeySpec +// (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeySpec): +// The type of key material in the public key, such as RSA_4096 or +// ECC_NIST_P521. +// +// * KeyUsage +// (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeyUsage): +// Whether the key is used for encryption or signing. +// +// * EncryptionAlgorithms +// (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-EncryptionAlgorithms) +// or SigningAlgorithms +// (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-SigningAlgorithms): +// A list of the encryption algorithms or the signing algorithms for the +// key. +// +// Although KMS cannot enforce these restrictions on external operations, it +// is crucial that you use this information to prevent the public key from being +// used improperly. For example, you can prevent a public signing key from being +// used encrypt data, or prevent a public key from being used with an encryption +// algorithm that is not supported by KMS. You can also avoid errors, such as using +// the wrong signing algorithm in a verification operation. To verify a signature +// outside of KMS with an SM2 public key (China Regions only), you must specify the +// distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing +// ID. For more information, see Offline verification with SM2 key pairs +// (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification). +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: Yes. To perform this +// operation with a KMS key in a different Amazon Web Services account, specify the +// key ARN or alias ARN in the value of the KeyId parameter. Required permissions: +// kms:GetPublicKey +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: CreateKey +func (c *Client) GetPublicKey(ctx context.Context, params *GetPublicKeyInput, optFns ...func(*Options)) (*GetPublicKeyOutput, error) { + if params == nil { + params = &GetPublicKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetPublicKey", params, optFns, c.addOperationGetPublicKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetPublicKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetPublicKeyInput struct { + + // Identifies the asymmetric KMS key that includes the public key. To specify a KMS + // key, use its key ID, key ARN, alias name, or alias ARN. When using an alias + // name, prefix it with "alias/". To specify a KMS key in a different Amazon Web + // Services account, you must use the key ARN or alias ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + noSmithyDocumentSerde +} + +type GetPublicKeyOutput struct { + + // Instead, use the KeySpec field in the GetPublicKey response. The KeySpec and + // CustomerMasterKeySpec fields have the same value. We recommend that you use the + // KeySpec field in your code. However, to avoid breaking changes, KMS supports + // both fields. + // + // Deprecated: This field has been deprecated. Instead, use the KeySpec field. + CustomerMasterKeySpec types.CustomerMasterKeySpec + + // The encryption algorithms that KMS supports for this key. This information is + // critical. If a public key encrypts data outside of KMS by using an unsupported + // encryption algorithm, the ciphertext cannot be decrypted. This field appears in + // the response only when the KeyUsage of the public key is ENCRYPT_DECRYPT. + EncryptionAlgorithms []types.EncryptionAlgorithmSpec + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the asymmetric KMS key from which the public key was downloaded. + KeyId *string + + // The type of the of the public key that was downloaded. + KeySpec types.KeySpec + + // The permitted use of the public key. Valid values are ENCRYPT_DECRYPT or + // SIGN_VERIFY. This information is critical. If a public key with SIGN_VERIFY key + // usage encrypts data outside of KMS, the ciphertext cannot be decrypted. + KeyUsage types.KeyUsageType + + // The exported public key. The value is a DER-encoded X.509 public key, also known + // as SubjectPublicKeyInfo (SPKI), as defined in RFC 5280 + // (https://tools.ietf.org/html/rfc5280). When you use the HTTP API or the Amazon + // Web Services CLI, the value is Base64-encoded. Otherwise, it is not + // Base64-encoded. + PublicKey []byte + + // The signing algorithms that KMS supports for this key. This field appears in the + // response only when the KeyUsage of the public key is SIGN_VERIFY. + SigningAlgorithms []types.SigningAlgorithmSpec + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetPublicKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetPublicKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetPublicKey{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetPublicKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetPublicKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetPublicKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GetPublicKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go new file mode 100644 index 00000000000..17f45760102 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go @@ -0,0 +1,223 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Imports key material into an existing symmetric encryption KMS key that was +// created without key material. After you successfully import key material into a +// KMS key, you can reimport the same key material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#reimport-key-material) +// into that KMS key, but you cannot import different key material. You cannot +// perform this operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS +// key in a different Amazon Web Services account. For more information about +// creating KMS keys with no key material and then importing key material, see +// Importing Key Material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in +// the Key Management Service Developer Guide. Before using this operation, call +// GetParametersForImport. Its response includes a public key and an import token. +// Use the public key to encrypt the key material. Then, submit the import token +// from the same GetParametersForImport response. When calling this operation, you +// must specify the following values: +// +// * The key ID or key ARN of a KMS key with no +// key material. Its Origin must be EXTERNAL. To create a KMS key with no key +// material, call CreateKey and set the value of its Origin parameter to EXTERNAL. +// To get the Origin of a KMS key, call DescribeKey.) +// +// * The encrypted key +// material. To get the public key to encrypt the key material, call +// GetParametersForImport. +// +// * The import token that GetParametersForImport +// returned. You must use a public key and token from the same +// GetParametersForImport response. +// +// * Whether the key material expires +// (ExpirationModel) and, if so, when (ValidTo). If you set an expiration date, on +// the specified date, KMS deletes the key material from the KMS key, making the +// KMS key unusable. To use the KMS key in cryptographic operations again, you must +// reimport the same key material. The only way to change the expiration model or +// expiration date is by reimporting the same key material and specifying a new +// expiration date. +// +// When this operation is successful, the key state of the KMS +// key changes from PendingImport to Enabled, and you can use the KMS key. If this +// operation fails, use the exception to help determine the problem. If the error +// is related to the key material, the import token, or wrapping key, use +// GetParametersForImport to get a new public key and import token for the KMS key +// and repeat the import procedure. For help, see How To Import Key Material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#importing-keys-overview) +// in the Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:ImportKeyMaterial +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * DeleteImportedKeyMaterial +// +// * +// GetParametersForImport +func (c *Client) ImportKeyMaterial(ctx context.Context, params *ImportKeyMaterialInput, optFns ...func(*Options)) (*ImportKeyMaterialOutput, error) { + if params == nil { + params = &ImportKeyMaterialInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ImportKeyMaterial", params, optFns, c.addOperationImportKeyMaterialMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ImportKeyMaterialOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ImportKeyMaterialInput struct { + + // The encrypted key material to import. The key material must be encrypted with + // the public wrapping key that GetParametersForImport returned, using the wrapping + // algorithm that you specified in the same GetParametersForImport request. + // + // This member is required. + EncryptedKeyMaterial []byte + + // The import token that you received in the response to a previous + // GetParametersForImport request. It must be from the same response that contained + // the public key that you used to encrypt the key material. + // + // This member is required. + ImportToken []byte + + // The identifier of the symmetric encryption KMS key that receives the imported + // key material. This must be the same KMS key specified in the KeyID parameter of + // the corresponding GetParametersForImport request. The Origin of the KMS key must + // be EXTERNAL. You cannot perform this operation on an asymmetric KMS key, an HMAC + // KMS key, a KMS key in a custom key store, or on a KMS key in a different Amazon + // Web Services account Specify the key ID or key ARN of the KMS key. For + // example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES. + // When the value of ExpirationModel is KEY_MATERIAL_EXPIRES, you must specify a + // value for the ValidTo parameter. When value is KEY_MATERIAL_DOES_NOT_EXPIRE, you + // must omit the ValidTo parameter. You cannot change the ExpirationModel or + // ValidTo values for the current import after the request completes. To change + // either value, you must delete (DeleteImportedKeyMaterial) and reimport the key + // material. + ExpirationModel types.ExpirationModelType + + // The date and time when the imported key material expires. This parameter is + // required when the value of the ExpirationModel parameter is + // KEY_MATERIAL_EXPIRES. Otherwise it is not valid. The value of this parameter + // must be a future date and time. The maximum value is 365 days from the request + // date. When the key material expires, KMS deletes the key material from the KMS + // key. Without its key material, the KMS key is unusable. To use the KMS key in + // cryptographic operations, you must reimport the same key material. You cannot + // change the ExpirationModel or ValidTo values for the current import after the + // request completes. To change either value, you must delete + // (DeleteImportedKeyMaterial) and reimport the key material. + ValidTo *time.Time + + noSmithyDocumentSerde +} + +type ImportKeyMaterialOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationImportKeyMaterialMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpImportKeyMaterial{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpImportKeyMaterial{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpImportKeyMaterialValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opImportKeyMaterial(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opImportKeyMaterial(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ImportKeyMaterial", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go new file mode 100644 index 00000000000..10f9345036c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go @@ -0,0 +1,270 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets a list of aliases in the caller's Amazon Web Services account and region. +// For more information about aliases, see CreateAlias. By default, the ListAliases +// operation returns all aliases in the account and region. To get only the aliases +// associated with a particular KMS key, use the KeyId parameter. The ListAliases +// response can include aliases that you created and associated with your customer +// managed keys, and aliases that Amazon Web Services created and associated with +// Amazon Web Services managed keys in your account. You can recognize Amazon Web +// Services aliases because their names have the format aws/, such as aws/dynamodb. +// The response might also include aliases that have no TargetKeyId field. These +// are predefined aliases that Amazon Web Services has created but has not yet +// associated with a KMS key. Aliases that Amazon Web Services creates in your +// account, including predefined aliases, do not count against your KMS aliases +// quota +// (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#aliases-limit). +// Cross-account use: No. ListAliases does not return aliases in other Amazon Web +// Services accounts. Required permissions: kms:ListAliases +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) For details, see Controlling access to aliases +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) +// in the Key Management Service Developer Guide. Related operations: +// +// * +// CreateAlias +// +// * DeleteAlias +// +// * UpdateAlias +func (c *Client) ListAliases(ctx context.Context, params *ListAliasesInput, optFns ...func(*Options)) (*ListAliasesOutput, error) { + if params == nil { + params = &ListAliasesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListAliases", params, optFns, c.addOperationListAliasesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListAliasesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListAliasesInput struct { + + // Lists only aliases that are associated with the specified KMS key. Enter a KMS + // key in your Amazon Web Services account. This parameter is optional. If you omit + // it, ListAliases returns all aliases in the account and Region. Specify the key + // ID or key ARN of the KMS key. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + KeyId *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 100, inclusive. If you do not include a value, it defaults + // to 50. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListAliasesOutput struct { + + // A list of aliases. + Aliases []types.AliasListEntry + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in thisresponse to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListAliasesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListAliases{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListAliases{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAliases(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListAliasesAPIClient is a client that implements the ListAliases operation. +type ListAliasesAPIClient interface { + ListAliases(context.Context, *ListAliasesInput, ...func(*Options)) (*ListAliasesOutput, error) +} + +var _ ListAliasesAPIClient = (*Client)(nil) + +// ListAliasesPaginatorOptions is the paginator options for ListAliases +type ListAliasesPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 100, inclusive. If you do not include a value, it defaults + // to 50. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListAliasesPaginator is a paginator for ListAliases +type ListAliasesPaginator struct { + options ListAliasesPaginatorOptions + client ListAliasesAPIClient + params *ListAliasesInput + nextToken *string + firstPage bool +} + +// NewListAliasesPaginator returns a new ListAliasesPaginator +func NewListAliasesPaginator(client ListAliasesAPIClient, params *ListAliasesInput, optFns ...func(*ListAliasesPaginatorOptions)) *ListAliasesPaginator { + if params == nil { + params = &ListAliasesInput{} + } + + options := ListAliasesPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListAliasesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListAliasesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListAliases page. +func (p *ListAliasesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAliasesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + result, err := p.client.ListAliases(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListAliases(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ListAliases", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go new file mode 100644 index 00000000000..21b11e1243c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go @@ -0,0 +1,283 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets a list of all grants for the specified KMS key. You must specify the KMS +// key in all requests. You can filter the grant list by grant ID or grantee +// principal. For detailed information about grants, including grant terminology, +// see Grants in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) in the Key +// Management Service Developer Guide . For examples of working with grants in +// several programming languages, see Programming grants +// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). +// The GranteePrincipal field in the ListGrants response usually contains the user +// or role designated as the grantee principal in the grant. However, when the +// grantee principal in the grant is an Amazon Web Services service, the +// GranteePrincipal field contains the service principal +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services), +// which might represent several different grantee principals. Cross-account use: +// Yes. To perform this operation on a KMS key in a different Amazon Web Services +// account, specify the key ARN in the value of the KeyId parameter. Required +// permissions: kms:ListGrants +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * CreateGrant +// +// * ListRetirableGrants +// +// * +// RetireGrant +// +// * RevokeGrant +func (c *Client) ListGrants(ctx context.Context, params *ListGrantsInput, optFns ...func(*Options)) (*ListGrantsOutput, error) { + if params == nil { + params = &ListGrantsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListGrants", params, optFns, c.addOperationListGrantsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListGrantsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListGrantsInput struct { + + // Returns only grants for the specified KMS key. This parameter is required. + // Specify the key ID or key ARN of the KMS key. To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN. For example: + // + // * + // Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // Returns only the grant with the specified grant ID. The grant ID uniquely + // identifies the grant. + GrantId *string + + // Returns only grants where the specified principal is the grantee principal for + // the grant. + GranteePrincipal *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 100, inclusive. If you do not include a value, it defaults + // to 50. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListGrantsOutput struct { + + // A list of grants. + Grants []types.GrantListEntry + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in thisresponse to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListGrantsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListGrants{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListGrants{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListGrantsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListGrants(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListGrantsAPIClient is a client that implements the ListGrants operation. +type ListGrantsAPIClient interface { + ListGrants(context.Context, *ListGrantsInput, ...func(*Options)) (*ListGrantsOutput, error) +} + +var _ ListGrantsAPIClient = (*Client)(nil) + +// ListGrantsPaginatorOptions is the paginator options for ListGrants +type ListGrantsPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 100, inclusive. If you do not include a value, it defaults + // to 50. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListGrantsPaginator is a paginator for ListGrants +type ListGrantsPaginator struct { + options ListGrantsPaginatorOptions + client ListGrantsAPIClient + params *ListGrantsInput + nextToken *string + firstPage bool +} + +// NewListGrantsPaginator returns a new ListGrantsPaginator +func NewListGrantsPaginator(client ListGrantsAPIClient, params *ListGrantsInput, optFns ...func(*ListGrantsPaginatorOptions)) *ListGrantsPaginator { + if params == nil { + params = &ListGrantsInput{} + } + + options := ListGrantsPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListGrantsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListGrantsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListGrants page. +func (p *ListGrantsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListGrantsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + result, err := p.client.ListGrants(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListGrants(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ListGrants", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go new file mode 100644 index 00000000000..a6c31313e38 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go @@ -0,0 +1,257 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets the names of the key policies that are attached to a KMS key. This +// operation is designed to get policy names that you can use in a GetKeyPolicy +// operation. However, the only valid policy name is default. Cross-account use: +// No. You cannot perform this operation on a KMS key in a different Amazon Web +// Services account. Required permissions: kms:ListKeyPolicies +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * GetKeyPolicy +// +// * PutKeyPolicy +func (c *Client) ListKeyPolicies(ctx context.Context, params *ListKeyPoliciesInput, optFns ...func(*Options)) (*ListKeyPoliciesOutput, error) { + if params == nil { + params = &ListKeyPoliciesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListKeyPolicies", params, optFns, c.addOperationListKeyPoliciesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListKeyPoliciesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListKeyPoliciesInput struct { + + // Gets the names of key policies for the specified KMS key. Specify the key ID or + // key ARN of the KMS key. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 1000, inclusive. If you do not include a value, it + // defaults to 100. Only one policy can be attached to a key. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListKeyPoliciesOutput struct { + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A list of key policy names. The only valid value is default. + PolicyNames []string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in thisresponse to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListKeyPoliciesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListKeyPolicies{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListKeyPolicies{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListKeyPoliciesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListKeyPolicies(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListKeyPoliciesAPIClient is a client that implements the ListKeyPolicies +// operation. +type ListKeyPoliciesAPIClient interface { + ListKeyPolicies(context.Context, *ListKeyPoliciesInput, ...func(*Options)) (*ListKeyPoliciesOutput, error) +} + +var _ ListKeyPoliciesAPIClient = (*Client)(nil) + +// ListKeyPoliciesPaginatorOptions is the paginator options for ListKeyPolicies +type ListKeyPoliciesPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 1000, inclusive. If you do not include a value, it + // defaults to 100. Only one policy can be attached to a key. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListKeyPoliciesPaginator is a paginator for ListKeyPolicies +type ListKeyPoliciesPaginator struct { + options ListKeyPoliciesPaginatorOptions + client ListKeyPoliciesAPIClient + params *ListKeyPoliciesInput + nextToken *string + firstPage bool +} + +// NewListKeyPoliciesPaginator returns a new ListKeyPoliciesPaginator +func NewListKeyPoliciesPaginator(client ListKeyPoliciesAPIClient, params *ListKeyPoliciesInput, optFns ...func(*ListKeyPoliciesPaginatorOptions)) *ListKeyPoliciesPaginator { + if params == nil { + params = &ListKeyPoliciesInput{} + } + + options := ListKeyPoliciesPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListKeyPoliciesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListKeyPoliciesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListKeyPolicies page. +func (p *ListKeyPoliciesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListKeyPoliciesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + result, err := p.client.ListKeyPolicies(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListKeyPolicies(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ListKeyPolicies", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go new file mode 100644 index 00000000000..c554f536e11 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go @@ -0,0 +1,242 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets a list of all KMS keys in the caller's Amazon Web Services account and +// Region. Cross-account use: No. You cannot perform this operation on a KMS key in +// a different Amazon Web Services account. Required permissions: kms:ListKeys +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) Related operations: +// +// * CreateKey +// +// * DescribeKey +// +// * ListAliases +// +// * +// ListResourceTags +func (c *Client) ListKeys(ctx context.Context, params *ListKeysInput, optFns ...func(*Options)) (*ListKeysOutput, error) { + if params == nil { + params = &ListKeysInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListKeys", params, optFns, c.addOperationListKeysMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListKeysOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListKeysInput struct { + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 1000, inclusive. If you do not include a value, it + // defaults to 100. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListKeysOutput struct { + + // A list of KMS keys. + Keys []types.KeyListEntry + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in thisresponse to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListKeysMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListKeys{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListKeys{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListKeys(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListKeysAPIClient is a client that implements the ListKeys operation. +type ListKeysAPIClient interface { + ListKeys(context.Context, *ListKeysInput, ...func(*Options)) (*ListKeysOutput, error) +} + +var _ ListKeysAPIClient = (*Client)(nil) + +// ListKeysPaginatorOptions is the paginator options for ListKeys +type ListKeysPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 1000, inclusive. If you do not include a value, it + // defaults to 100. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListKeysPaginator is a paginator for ListKeys +type ListKeysPaginator struct { + options ListKeysPaginatorOptions + client ListKeysAPIClient + params *ListKeysInput + nextToken *string + firstPage bool +} + +// NewListKeysPaginator returns a new ListKeysPaginator +func NewListKeysPaginator(client ListKeysAPIClient, params *ListKeysInput, optFns ...func(*ListKeysPaginatorOptions)) *ListKeysPaginator { + if params == nil { + params = &ListKeysInput{} + } + + options := ListKeysPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListKeysPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListKeysPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListKeys page. +func (p *ListKeysPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListKeysOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + result, err := p.client.ListKeys(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListKeys(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ListKeys", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go new file mode 100644 index 00000000000..02c74956079 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go @@ -0,0 +1,272 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns all tags on the specified KMS key. For general information about tags, +// including the format and syntax, see Tagging Amazon Web Services resources +// (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the Amazon +// Web Services General Reference. For information about using tags in KMS, see +// Tagging keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. Required permissions: +// kms:ListResourceTags +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * CreateKey +// +// * ReplicateKey +// +// * TagResource +// +// * +// UntagResource +func (c *Client) ListResourceTags(ctx context.Context, params *ListResourceTagsInput, optFns ...func(*Options)) (*ListResourceTagsOutput, error) { + if params == nil { + params = &ListResourceTagsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListResourceTags", params, optFns, c.addOperationListResourceTagsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListResourceTagsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListResourceTagsInput struct { + + // Gets tags on the specified KMS key. Specify the key ID or key ARN of the KMS + // key. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 50, inclusive. If you do not include a value, it defaults + // to 50. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. Do not attempt to construct this value. Use only the value of + // NextMarker from the truncated response you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListResourceTagsOutput struct { + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. Do not assume or infer any + // information from this value. + NextMarker *string + + // A list of tags. Each tag consists of a tag key and a tag value. Tagging or + // untagging a KMS key can allow or deny permission to the KMS key. For details, + // see ABAC for KMS + // (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key + // Management Service Developer Guide. + Tags []types.Tag + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in thisresponse to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListResourceTagsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListResourceTags{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListResourceTags{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListResourceTagsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListResourceTags(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListResourceTagsAPIClient is a client that implements the ListResourceTags +// operation. +type ListResourceTagsAPIClient interface { + ListResourceTags(context.Context, *ListResourceTagsInput, ...func(*Options)) (*ListResourceTagsOutput, error) +} + +var _ ListResourceTagsAPIClient = (*Client)(nil) + +// ListResourceTagsPaginatorOptions is the paginator options for ListResourceTags +type ListResourceTagsPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 50, inclusive. If you do not include a value, it defaults + // to 50. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListResourceTagsPaginator is a paginator for ListResourceTags +type ListResourceTagsPaginator struct { + options ListResourceTagsPaginatorOptions + client ListResourceTagsAPIClient + params *ListResourceTagsInput + nextToken *string + firstPage bool +} + +// NewListResourceTagsPaginator returns a new ListResourceTagsPaginator +func NewListResourceTagsPaginator(client ListResourceTagsAPIClient, params *ListResourceTagsInput, optFns ...func(*ListResourceTagsPaginatorOptions)) *ListResourceTagsPaginator { + if params == nil { + params = &ListResourceTagsInput{} + } + + options := ListResourceTagsPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListResourceTagsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListResourceTagsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListResourceTags page. +func (p *ListResourceTagsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListResourceTagsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + result, err := p.client.ListResourceTags(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListResourceTags(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ListResourceTags", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go new file mode 100644 index 00000000000..d15ead72082 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go @@ -0,0 +1,274 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns information about all grants in the Amazon Web Services account and +// Region that have the specified retiring principal. You can specify any principal +// in your Amazon Web Services account. The grants that are returned include grants +// for KMS keys in your Amazon Web Services account and other Amazon Web Services +// accounts. You might use this operation to determine which grants you may retire. +// To retire a grant, use the RetireGrant operation. For detailed information about +// grants, including grant terminology, see Grants in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) in the Key +// Management Service Developer Guide . For examples of working with grants in +// several programming languages, see Programming grants +// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). +// Cross-account use: You must specify a principal in your Amazon Web Services +// account. However, this operation can return grants in any Amazon Web Services +// account. You do not need kms:ListRetirableGrants permission (or any other +// additional permission) in any Amazon Web Services account other than your own. +// Required permissions: kms:ListRetirableGrants +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) in your Amazon Web Services account. Related operations: +// +// * +// CreateGrant +// +// * ListGrants +// +// * RetireGrant +// +// * RevokeGrant +func (c *Client) ListRetirableGrants(ctx context.Context, params *ListRetirableGrantsInput, optFns ...func(*Options)) (*ListRetirableGrantsOutput, error) { + if params == nil { + params = &ListRetirableGrantsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListRetirableGrants", params, optFns, c.addOperationListRetirableGrantsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListRetirableGrantsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListRetirableGrantsInput struct { + + // The retiring principal for which to list grants. Enter a principal in your + // Amazon Web Services account. To specify the retiring principal, use the Amazon + // Resource Name (ARN) + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of + // an Amazon Web Services principal. Valid Amazon Web Services principals include + // Amazon Web Services accounts (root), IAM users, federated users, and assumed + // role users. For examples of the ARN syntax for specifying a principal, see + // Amazon Web Services Identity and Access Management (IAM) + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) + // in the Example ARNs section of the Amazon Web Services General Reference. + // + // This member is required. + RetiringPrincipal *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 100, inclusive. If you do not include a value, it defaults + // to 50. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListRetirableGrantsOutput struct { + + // A list of grants. + Grants []types.GrantListEntry + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in thisresponse to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListRetirableGrantsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListRetirableGrants{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListRetirableGrants{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListRetirableGrantsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListRetirableGrants(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListRetirableGrantsAPIClient is a client that implements the ListRetirableGrants +// operation. +type ListRetirableGrantsAPIClient interface { + ListRetirableGrants(context.Context, *ListRetirableGrantsInput, ...func(*Options)) (*ListRetirableGrantsOutput, error) +} + +var _ ListRetirableGrantsAPIClient = (*Client)(nil) + +// ListRetirableGrantsPaginatorOptions is the paginator options for +// ListRetirableGrants +type ListRetirableGrantsPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 100, inclusive. If you do not include a value, it defaults + // to 50. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListRetirableGrantsPaginator is a paginator for ListRetirableGrants +type ListRetirableGrantsPaginator struct { + options ListRetirableGrantsPaginatorOptions + client ListRetirableGrantsAPIClient + params *ListRetirableGrantsInput + nextToken *string + firstPage bool +} + +// NewListRetirableGrantsPaginator returns a new ListRetirableGrantsPaginator +func NewListRetirableGrantsPaginator(client ListRetirableGrantsAPIClient, params *ListRetirableGrantsInput, optFns ...func(*ListRetirableGrantsPaginatorOptions)) *ListRetirableGrantsPaginator { + if params == nil { + params = &ListRetirableGrantsInput{} + } + + options := ListRetirableGrantsPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListRetirableGrantsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListRetirableGrantsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListRetirableGrants page. +func (p *ListRetirableGrantsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListRetirableGrantsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + result, err := p.client.ListRetirableGrants(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListRetirableGrants(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ListRetirableGrants", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go new file mode 100644 index 00000000000..9a785b2adf9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go @@ -0,0 +1,199 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Attaches a key policy to the specified KMS key. For more information about key +// policies, see Key Policies +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) in the +// Key Management Service Developer Guide. For help writing and formatting a JSON +// policy document, see the IAM JSON Policy Reference +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) in +// the Identity and Access Management User Guide . For examples of adding a key +// policy in multiple programming languages, see Setting a key policy +// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-key-policies.html#put-policy) +// in the Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:PutKeyPolicy +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: GetKeyPolicy +func (c *Client) PutKeyPolicy(ctx context.Context, params *PutKeyPolicyInput, optFns ...func(*Options)) (*PutKeyPolicyOutput, error) { + if params == nil { + params = &PutKeyPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutKeyPolicy", params, optFns, c.addOperationPutKeyPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutKeyPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutKeyPolicyInput struct { + + // Sets the key policy on the specified KMS key. Specify the key ID or key ARN of + // the KMS key. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key + // ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // The key policy to attach to the KMS key. The key policy must meet the following + // criteria: + // + // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key + // policy must allow the principal that is making the PutKeyPolicy request to make + // a subsequent PutKeyPolicy request on the KMS key. This reduces the risk that the + // KMS key becomes unmanageable. For more information, refer to the scenario in the + // Default Key Policy + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section of the Key Management Service Developer Guide. + // + // * Each statement in the + // key policy must contain one or more principals. The principals in the key policy + // must exist and be visible to KMS. When you create a new Amazon Web Services + // principal (for example, an IAM user or role), you might need to enforce a delay + // before including the new principal in a key policy because the new principal + // might not be immediately visible to KMS. For more information, see Changes that + // I make are not always immediately visible + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // in the Amazon Web Services Identity and Access Management User Guide. + // + // A key + // policy document can include only the following characters: + // + // * Printable ASCII + // characters from the space character (\u0020) through the end of the ASCII + // character range. + // + // * Printable characters in the Basic Latin and Latin-1 + // Supplement character set (through \u00FF). + // + // * The tab (\u0009), line feed + // (\u000A), and carriage return (\u000D) special characters + // + // For information about + // key policies, see Key policies in KMS + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) in the + // Key Management Service Developer Guide.For help writing and formatting a JSON + // policy document, see the IAM JSON Policy Reference + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) in + // the Identity and Access Management User Guide . + // + // This member is required. + Policy *string + + // The name of the key policy. The only valid value is default. + // + // This member is required. + PolicyName *string + + // A flag to indicate whether to bypass the key policy lockout safety check. + // Setting this value to true increases the risk that the KMS key becomes + // unmanageable. Do not set this value to true indiscriminately. For more + // information, refer to the scenario in the Default Key Policy + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section in the Key Management Service Developer Guide. Use this parameter only + // when you intend to prevent the principal that is making the request from making + // a subsequent PutKeyPolicy request on the KMS key. The default value is false. + BypassPolicyLockoutSafetyCheck bool + + noSmithyDocumentSerde +} + +type PutKeyPolicyOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutKeyPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutKeyPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutKeyPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpPutKeyPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutKeyPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutKeyPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "PutKeyPolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go new file mode 100644 index 00000000000..fe0f4e70aa9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go @@ -0,0 +1,329 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Decrypts ciphertext and then reencrypts it entirely within KMS. You can use this +// operation to change the KMS key under which data is encrypted, such as when you +// manually rotate +// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-manually) +// a KMS key or change the KMS key that protects a ciphertext. You can also use it +// to reencrypt ciphertext under the same KMS key, such as to change the encryption +// context +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// of a ciphertext. The ReEncrypt operation can decrypt ciphertext that was +// encrypted by using a KMS key in an KMS operation, such as Encrypt or +// GenerateDataKey. It can also decrypt ciphertext that was encrypted by using the +// public key of an asymmetric KMS key +// (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks) +// outside of KMS. However, it cannot decrypt ciphertext produced by other +// libraries, such as the Amazon Web Services Encryption SDK +// (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) or Amazon +// S3 client-side encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). +// These libraries return a ciphertext format that is incompatible with KMS. When +// you use the ReEncrypt operation, you need to provide information for the decrypt +// operation and the subsequent encrypt operation. +// +// * If your ciphertext was +// encrypted under an asymmetric KMS key, you must use the SourceKeyId parameter to +// identify the KMS key that encrypted the ciphertext. You must also supply the +// encryption algorithm that was used. This information is required to decrypt the +// data. +// +// * If your ciphertext was encrypted under a symmetric encryption KMS key, +// the SourceKeyId parameter is optional. KMS can get this information from +// metadata that it adds to the symmetric ciphertext blob. This feature adds +// durability to your implementation by ensuring that authorized users can decrypt +// ciphertext decades after it was encrypted, even if they've lost track of the key +// ID. However, specifying the source KMS key is always recommended as a best +// practice. When you use the SourceKeyId parameter to specify a KMS key, KMS uses +// only the KMS key you specify. If the ciphertext was encrypted under a different +// KMS key, the ReEncrypt operation fails. This practice ensures that you use the +// KMS key that you intend. +// +// * To reencrypt the data, you must use the +// DestinationKeyId parameter to specify the KMS key that re-encrypts the data +// after it is decrypted. If the destination KMS key is an asymmetric KMS key, you +// must also provide the encryption algorithm. The algorithm that you choose must +// be compatible with the KMS key. When you use an asymmetric KMS key to encrypt or +// reencrypt data, be sure to record the KMS key and encryption algorithm that you +// choose. You will be required to provide the same KMS key and encryption +// algorithm when you decrypt the data. If the KMS key and algorithm do not match +// the values used to encrypt the data, the decrypt operation fails. You are not +// required to supply the key ID and encryption algorithm when you decrypt with +// symmetric encryption KMS keys because KMS stores this information in the +// ciphertext blob. KMS cannot store metadata in ciphertext generated with +// asymmetric keys. The standard format for asymmetric key ciphertext does not +// include configurable fields. +// +// The KMS key that you use for this operation must +// be in a compatible key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: Yes. The source KMS +// key and destination KMS key can be in different Amazon Web Services accounts. +// Either or both KMS keys can be in a different account than the caller. To +// specify a KMS key in a different account, you must use its key ARN or alias ARN. +// Required permissions: +// +// * kms:ReEncryptFrom +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// permission on the source KMS key (key policy) +// +// * kms:ReEncryptTo +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// permission on the destination KMS key (key policy) +// +// To permit reencryption from +// or to a KMS key, include the "kms:ReEncrypt*" permission in your key policy +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html). This +// permission is automatically included in the key policy when you use the console +// to create a KMS key. But you must include it manually when you create a KMS key +// programmatically or when you use the PutKeyPolicy operation to set a key policy. +// Related operations: +// +// * Decrypt +// +// * Encrypt +// +// * GenerateDataKey +// +// * +// GenerateDataKeyPair +func (c *Client) ReEncrypt(ctx context.Context, params *ReEncryptInput, optFns ...func(*Options)) (*ReEncryptOutput, error) { + if params == nil { + params = &ReEncryptInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ReEncrypt", params, optFns, c.addOperationReEncryptMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ReEncryptOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ReEncryptInput struct { + + // Ciphertext of the data to reencrypt. + // + // This member is required. + CiphertextBlob []byte + + // A unique identifier for the KMS key that is used to reencrypt the data. Specify + // a symmetric encryption KMS key or an asymmetric KMS key with a KeyUsage value of + // ENCRYPT_DECRYPT. To find the KeyUsage value of a KMS key, use the DescribeKey + // operation. To specify a KMS key, use its key ID, key ARN, alias name, or alias + // ARN. When using an alias name, prefix it with "alias/". To specify a KMS key in + // a different Amazon Web Services account, you must use the key ARN or alias ARN. + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + DestinationKeyId *string + + // Specifies the encryption algorithm that KMS will use to reecrypt the data after + // it has decrypted it. The default value, SYMMETRIC_DEFAULT, represents the + // encryption algorithm used for symmetric encryption KMS keys. This parameter is + // required only when the destination KMS key is an asymmetric KMS key. + DestinationEncryptionAlgorithm types.EncryptionAlgorithmSpec + + // Specifies that encryption context to use when the reencrypting the data. A + // destination encryption context is valid only when the destination KMS key is a + // symmetric encryption KMS key. The standard ciphertext format for asymmetric KMS + // keys does not include fields for metadata. An encryption context is a collection + // of non-secret key-value pairs that represent additional authenticated data. When + // you use an encryption context to encrypt data, you must specify the same (an + // exact case-sensitive match) encryption context to decrypt the data. An + // encryption context is supported only on operations with symmetric encryption KMS + // keys. On operations with symmetric encryption KMS keys, an encryption context is + // optional, but it is strongly recommended. For more information, see Encryption + // context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide. + DestinationEncryptionContext map[string]string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + // Specifies the encryption algorithm that KMS will use to decrypt the ciphertext + // before it is reencrypted. The default value, SYMMETRIC_DEFAULT, represents the + // algorithm used for symmetric encryption KMS keys. Specify the same algorithm + // that was used to encrypt the ciphertext. If you specify a different algorithm, + // the decrypt attempt fails. This parameter is required only when the ciphertext + // was encrypted under an asymmetric KMS key. + SourceEncryptionAlgorithm types.EncryptionAlgorithmSpec + + // Specifies the encryption context to use to decrypt the ciphertext. Enter the + // same encryption context that was used to encrypt the ciphertext. An encryption + // context is a collection of non-secret key-value pairs that represent additional + // authenticated data. When you use an encryption context to encrypt data, you must + // specify the same (an exact case-sensitive match) encryption context to decrypt + // the data. An encryption context is supported only on operations with symmetric + // encryption KMS keys. On operations with symmetric encryption KMS keys, an + // encryption context is optional, but it is strongly recommended. For more + // information, see Encryption context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide. + SourceEncryptionContext map[string]string + + // Specifies the KMS key that KMS will use to decrypt the ciphertext before it is + // re-encrypted. Enter a key ID of the KMS key that was used to encrypt the + // ciphertext. If you identify a different KMS key, the ReEncrypt operation throws + // an IncorrectKeyException. This parameter is required only when the ciphertext + // was encrypted under an asymmetric KMS key. If you used a symmetric encryption + // KMS key, KMS can get the KMS key from metadata that it adds to the symmetric + // ciphertext blob. However, it is always recommended as a best practice. This + // practice ensures that you use the KMS key that you intend. To specify a KMS key, + // use its key ID, key ARN, alias name, or alias ARN. When using an alias name, + // prefix it with "alias/". To specify a KMS key in a different Amazon Web Services + // account, you must use the key ARN or alias ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + SourceKeyId *string + + noSmithyDocumentSerde +} + +type ReEncryptOutput struct { + + // The reencrypted data. When you use the HTTP API or the Amazon Web Services CLI, + // the value is Base64-encoded. Otherwise, it is not Base64-encoded. + CiphertextBlob []byte + + // The encryption algorithm that was used to reencrypt the data. + DestinationEncryptionAlgorithm types.EncryptionAlgorithmSpec + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key that was used to reencrypt the data. + KeyId *string + + // The encryption algorithm that was used to decrypt the ciphertext before it was + // reencrypted. + SourceEncryptionAlgorithm types.EncryptionAlgorithmSpec + + // Unique identifier of the KMS key used to originally encrypt the data. + SourceKeyId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationReEncryptMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpReEncrypt{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpReEncrypt{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpReEncryptValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opReEncrypt(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opReEncrypt(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ReEncrypt", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go new file mode 100644 index 00000000000..b84259f998c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go @@ -0,0 +1,337 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Replicates a multi-Region key into the specified Region. This operation creates +// a multi-Region replica key based on a multi-Region primary key in a different +// Region of the same Amazon Web Services partition. You can create multiple +// replicas of a primary key, but each must be in a different Region. To create a +// multi-Region primary key, use the CreateKey operation. This operation supports +// multi-Region keys, an KMS feature that lets you create multiple interoperable +// KMS keys in different Amazon Web Services Regions. Because these KMS keys have +// the same key ID, key material, and other metadata, you can use them +// interchangeably to encrypt data in one Amazon Web Services Region and decrypt it +// in a different Amazon Web Services Region without re-encrypting the data or +// making a cross-Region call. For more information about multi-Region keys, see +// Multi-Region keys in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) +// in the Key Management Service Developer Guide. A replica key is a +// fully-functional KMS key that can be used independently of its primary and peer +// replica keys. A primary key and its replica keys share properties that make them +// interoperable. They have the same key ID +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id) +// and key material. They also have the same key spec +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec), +// key usage +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage), +// key material origin +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin), +// and automatic key rotation status +// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html). KMS +// automatically synchronizes these shared properties among related multi-Region +// keys. All other properties of a replica key can differ, including its key policy +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html), tags +// (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html), +// aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html), +// and Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). KMS +// pricing and quotas for KMS keys apply to each primary key and replica key. When +// this operation completes, the new replica key has a transient key state of +// Creating. This key state changes to Enabled (or PendingImport) after a few +// seconds when the process of creating the new replica key is complete. While the +// key state is Creating, you can manage key, but you cannot yet use it in +// cryptographic operations. If you are creating and using the replica key +// programmatically, retry on KMSInvalidStateException or call DescribeKey to check +// its KeyState value before using it. For details about the Creating key state, +// see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. You cannot create more than one replica +// of a primary key in any Region. If the Region already includes a replica of the +// key you're trying to replicate, ReplicateKey returns an AlreadyExistsException +// error. If the key state of the existing replica is PendingDeletion, you can +// cancel the scheduled key deletion (CancelKeyDeletion) or wait for the key to be +// deleted. The new replica key you create will have the same shared properties +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html#mrk-sync-properties) +// as the original replica key. The CloudTrail log of a ReplicateKey operation +// records a ReplicateKey operation in the primary key's Region and a CreateKey +// operation in the replica key's Region. If you replicate a multi-Region primary +// key with imported key material, the replica key is created with no key material. +// You must import the same key material that you imported into the primary key. +// For details, see Importing key material into multi-Region keys in the Key +// Management Service Developer Guide. To convert a replica key to a primary key, +// use the UpdatePrimaryRegion operation. ReplicateKey uses different default +// values for the KeyPolicy and Tags parameters than those used in the KMS console. +// For details, see the parameter descriptions. Cross-account use: No. You cannot +// use this operation to create a replica key in a different Amazon Web Services +// account. Required permissions: +// +// * kms:ReplicateKey on the primary key (in the +// primary key's Region). Include this permission in the primary key's key +// policy. +// +// * kms:CreateKey in an IAM policy in the replica Region. +// +// * To use the +// Tags parameter, kms:TagResource in an IAM policy in the replica Region. +// +// Related +// operations +// +// * CreateKey +// +// * UpdatePrimaryRegion +func (c *Client) ReplicateKey(ctx context.Context, params *ReplicateKeyInput, optFns ...func(*Options)) (*ReplicateKeyOutput, error) { + if params == nil { + params = &ReplicateKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ReplicateKey", params, optFns, c.addOperationReplicateKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ReplicateKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ReplicateKeyInput struct { + + // Identifies the multi-Region primary key that is being replicated. To determine + // whether a KMS key is a multi-Region primary key, use the DescribeKey operation + // to check the value of the MultiRegionKeyType property. Specify the key ID or key + // ARN of a multi-Region primary key. For example: + // + // * Key ID: + // mrk-1234abcd12ab34cd56ef1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // The Region ID of the Amazon Web Services Region for this replica key. Enter the + // Region ID, such as us-east-1 or ap-southeast-2. For a list of Amazon Web + // Services Regions in which KMS is supported, see KMS service endpoints + // (https://docs.aws.amazon.com/general/latest/gr/kms.html#kms_region) in the + // Amazon Web Services General Reference. HMAC KMS keys are not supported in all + // Amazon Web Services Regions. If you try to replicate an HMAC KMS key in an + // Amazon Web Services Region in which HMAC keys are not supported, the + // ReplicateKey operation returns an UnsupportedOperationException. For a list of + // Regions in which HMAC KMS keys are supported, see HMAC keys in KMS + // (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) in the Key + // Management Service Developer Guide. The replica must be in a different Amazon + // Web Services Region than its primary key and other replicas of that primary key, + // but in the same Amazon Web Services partition. KMS must be available in the + // replica Region. If the Region is not enabled by default, the Amazon Web Services + // account must be enabled in the Region. For information about Amazon Web Services + // partitions, see Amazon Resource Names (ARNs) + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in + // the Amazon Web Services General Reference. For information about enabling and + // disabling Regions, see Enabling a Region + // (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-enable) + // and Disabling a Region + // (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-disable) + // in the Amazon Web Services General Reference. + // + // This member is required. + ReplicaRegion *string + + // A flag to indicate whether to bypass the key policy lockout safety check. + // Setting this value to true increases the risk that the KMS key becomes + // unmanageable. Do not set this value to true indiscriminately. For more + // information, refer to the scenario in the Default Key Policy + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section in the Key Management Service Developer Guide. Use this parameter only + // when you intend to prevent the principal that is making the request from making + // a subsequent PutKeyPolicy request on the KMS key. The default value is false. + BypassPolicyLockoutSafetyCheck bool + + // A description of the KMS key. The default value is an empty string (no + // description). The description is not a shared property of multi-Region keys. You + // can specify the same description or a different description for each key in a + // set of related multi-Region keys. KMS does not synchronize this property. + Description *string + + // The key policy to attach to the KMS key. This parameter is optional. If you do + // not provide a key policy, KMS attaches the default key policy + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) + // to the KMS key. The key policy is not a shared property of multi-Region keys. + // You can specify the same key policy or a different key policy for each key in a + // set of related multi-Region keys. KMS does not synchronize this property. If you + // provide a key policy, it must meet the following criteria: + // + // * If you don't set + // BypassPolicyLockoutSafetyCheck to true, the key policy must give the caller + // kms:PutKeyPolicy permission on the replica key. This reduces the risk that the + // KMS key becomes unmanageable. For more information, refer to the scenario in the + // Default Key Policy + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section of the Key Management Service Developer Guide . + // + // * Each statement in the + // key policy must contain one or more principals. The principals in the key policy + // must exist and be visible to KMS. When you create a new Amazon Web Services + // principal (for example, an IAM user or role), you might need to enforce a delay + // before including the new principal in a key policy because the new principal + // might not be immediately visible to KMS. For more information, see Changes that + // I make are not always immediately visible + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // in the Identity and Access Management User Guide . + // + // A key policy document can + // include only the following characters: + // + // * Printable ASCII characters from the + // space character (\u0020) through the end of the ASCII character range. + // + // * + // Printable characters in the Basic Latin and Latin-1 Supplement character set + // (through \u00FF). + // + // * The tab (\u0009), line feed (\u000A), and carriage return + // (\u000D) special characters + // + // For information about key policies, see Key + // policies in KMS + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) in the + // Key Management Service Developer Guide. For help writing and formatting a JSON + // policy document, see the IAM JSON Policy Reference + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) in + // the Identity and Access Management User Guide . + Policy *string + + // Assigns one or more tags to the replica key. Use this parameter to tag the KMS + // key when it is created. To tag an existing KMS key, use the TagResource + // operation. Tagging or untagging a KMS key can allow or deny permission to the + // KMS key. For details, see ABAC for KMS + // (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key + // Management Service Developer Guide. To use this parameter, you must have + // kms:TagResource + // (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) + // permission in an IAM policy. Tags are not a shared property of multi-Region + // keys. You can specify the same tags or different tags for each key in a set of + // related multi-Region keys. KMS does not synchronize this property. Each tag + // consists of a tag key and a tag value. Both the tag key and the tag value are + // required, but the tag value can be an empty (null) string. You cannot have more + // than one tag on a KMS key with the same tag key. If you specify an existing tag + // key with a different tag value, KMS replaces the current tag value with the + // specified one. When you add tags to an Amazon Web Services resource, Amazon Web + // Services generates a cost allocation report with usage and costs aggregated by + // tags. Tags can also be used to control access to a KMS key. For details, see + // Tagging Keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). + Tags []types.Tag + + noSmithyDocumentSerde +} + +type ReplicateKeyOutput struct { + + // Displays details about the new replica key, including its Amazon Resource Name + // (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // and Key states of KMS keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). It also + // includes the ARN and Amazon Web Services Region of its primary key and other + // replica keys. + ReplicaKeyMetadata *types.KeyMetadata + + // The key policy of the new replica key. The value is a key policy document in + // JSON format. + ReplicaPolicy *string + + // The tags on the new replica key. The value is a list of tag key and tag value + // pairs. + ReplicaTags []types.Tag + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationReplicateKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpReplicateKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpReplicateKey{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpReplicateKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opReplicateKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opReplicateKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ReplicateKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go new file mode 100644 index 00000000000..adb4876b924 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go @@ -0,0 +1,159 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a grant. Typically, you retire a grant when you no longer need its +// permissions. To identify the grant to retire, use a grant token +// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token), +// or both the grant ID and a key identifier (key ID or key ARN) of the KMS key. +// The CreateGrant operation returns both values. This operation can be called by +// the retiring principal for a grant, by the grantee principal if the grant allows +// the RetireGrant operation, and by the Amazon Web Services account in which the +// grant is created. It can also be called by principals to whom permission for +// retiring a grant is delegated. For details, see Retiring and revoking grants +// (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) +// in the Key Management Service Developer Guide. For detailed information about +// grants, including grant terminology, see Grants in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) in the Key +// Management Service Developer Guide . For examples of working with grants in +// several programming languages, see Programming grants +// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). +// Cross-account use: Yes. You can retire a grant on a KMS key in a different +// Amazon Web Services account. Required permissions::Permission to retire a grant +// is determined primarily by the grant. For details, see Retiring and revoking +// grants +// (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) +// in the Key Management Service Developer Guide. Related operations: +// +// * +// CreateGrant +// +// * ListGrants +// +// * ListRetirableGrants +// +// * RevokeGrant +func (c *Client) RetireGrant(ctx context.Context, params *RetireGrantInput, optFns ...func(*Options)) (*RetireGrantOutput, error) { + if params == nil { + params = &RetireGrantInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RetireGrant", params, optFns, c.addOperationRetireGrantMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RetireGrantOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RetireGrantInput struct { + + // Identifies the grant to retire. To get the grant ID, use CreateGrant, + // ListGrants, or ListRetirableGrants. + // + // * Grant ID Example - + // 0123456789012345678901234567890123456789012345678901234567890123 + GrantId *string + + // Identifies the grant to be retired. You can use a grant token to identify a new + // grant even before it has achieved eventual consistency. Only the CreateGrant + // operation returns a grant token. For details, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Eventual consistency + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency) + // in the Key Management Service Developer Guide. + GrantToken *string + + // The key ARN KMS key associated with the grant. To find the key ARN, use the + // ListKeys operation. For example: + // arn:aws:kms:us-east-2:444455556666:key/1234abcd-12ab-34cd-56ef-1234567890ab + KeyId *string + + noSmithyDocumentSerde +} + +type RetireGrantOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRetireGrantMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpRetireGrant{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpRetireGrant{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRetireGrant(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRetireGrant(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "RetireGrant", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go new file mode 100644 index 00000000000..75676053d4c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go @@ -0,0 +1,161 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the specified grant. You revoke a grant to terminate the permissions +// that the grant allows. For more information, see Retiring and revoking grants +// (https://docs.aws.amazon.com/kms/latest/developerguide/managing-grants.html#grant-delete) +// in the Key Management Service Developer Guide . When you create, retire, or +// revoke a grant, there might be a brief delay, usually less than five minutes, +// until the grant is available throughout KMS. This state is known as eventual +// consistency. For details, see Eventual consistency +// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency) +// in the Key Management Service Developer Guide . For detailed information about +// grants, including grant terminology, see Grants in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) in the Key +// Management Service Developer Guide . For examples of working with grants in +// several programming languages, see Programming grants +// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). +// Cross-account use: Yes. To perform this operation on a KMS key in a different +// Amazon Web Services account, specify the key ARN in the value of the KeyId +// parameter. Required permissions: kms:RevokeGrant +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy). Related operations: +// +// * CreateGrant +// +// * ListGrants +// +// * +// ListRetirableGrants +// +// * RetireGrant +func (c *Client) RevokeGrant(ctx context.Context, params *RevokeGrantInput, optFns ...func(*Options)) (*RevokeGrantOutput, error) { + if params == nil { + params = &RevokeGrantInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RevokeGrant", params, optFns, c.addOperationRevokeGrantMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RevokeGrantOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RevokeGrantInput struct { + + // Identifies the grant to revoke. To get the grant ID, use CreateGrant, + // ListGrants, or ListRetirableGrants. + // + // This member is required. + GrantId *string + + // A unique identifier for the KMS key associated with the grant. To get the key ID + // and key ARN for a KMS key, use ListKeys or DescribeKey. Specify the key ID or + // key ARN of the KMS key. To specify a KMS key in a different Amazon Web Services + // account, you must use the key ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type RevokeGrantOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRevokeGrantMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpRevokeGrant{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpRevokeGrant{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpRevokeGrantValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRevokeGrant(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRevokeGrant(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "RevokeGrant", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go new file mode 100644 index 00000000000..5d8b2891289 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go @@ -0,0 +1,207 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Schedules the deletion of a KMS key. By default, KMS applies a waiting period of +// 30 days, but you can specify a waiting period of 7-30 days. When this operation +// is successful, the key state of the KMS key changes to PendingDeletion and the +// key can't be used in any cryptographic operations. It remains in this state for +// the duration of the waiting period. Before the waiting period ends, you can use +// CancelKeyDeletion to cancel the deletion of the KMS key. After the waiting +// period ends, KMS deletes the KMS key, its key material, and all KMS data +// associated with it, including all aliases that refer to it. Deleting a KMS key +// is a destructive and potentially dangerous operation. When a KMS key is deleted, +// all data that was encrypted under the KMS key is unrecoverable. (The only +// exception is a multi-Region replica key.) To prevent the use of a KMS key +// without deleting it, use DisableKey. You can schedule the deletion of a +// multi-Region primary key and its replica keys at any time. However, KMS will not +// delete a multi-Region primary key with existing replica keys. If you schedule +// the deletion of a primary key with replicas, its key state changes to +// PendingReplicaDeletion and it cannot be replicated or used in cryptographic +// operations. This status can continue indefinitely. When the last of its replicas +// keys is deleted (not just scheduled), the key state of the primary key changes +// to PendingDeletion and its waiting period (PendingWindowInDays) begins. For +// details, see Deleting multi-Region keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html) +// in the Key Management Service Developer Guide. When KMS deletes a KMS key from +// an CloudHSM key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/delete-cmk-keystore.html), +// it makes a best effort to delete the associated key material from the associated +// CloudHSM cluster. However, you might need to manually delete the orphaned key +// material +// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key) +// from the cluster and its backups. Deleting a KMS key from an external key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/delete-xks-key.html) has +// no effect on the associated external key. However, for both types of custom key +// stores, deleting a KMS key is destructive and irreversible. You cannot decrypt +// ciphertext encrypted under the KMS key by using only its associated external key +// or CloudHSM key. Also, you cannot recreate a KMS key in an external key store by +// creating a new KMS key with the same key material. For more information about +// scheduling a KMS key for deletion, see Deleting KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) in +// the Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:ScheduleKeyDeletion (key policy) Related operations +// +// * +// CancelKeyDeletion +// +// * DisableKey +func (c *Client) ScheduleKeyDeletion(ctx context.Context, params *ScheduleKeyDeletionInput, optFns ...func(*Options)) (*ScheduleKeyDeletionOutput, error) { + if params == nil { + params = &ScheduleKeyDeletionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ScheduleKeyDeletion", params, optFns, c.addOperationScheduleKeyDeletionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ScheduleKeyDeletionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ScheduleKeyDeletionInput struct { + + // The unique identifier of the KMS key to delete. Specify the key ID or key ARN of + // the KMS key. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key + // ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // The waiting period, specified in number of days. After the waiting period ends, + // KMS deletes the KMS key. If the KMS key is a multi-Region primary key with + // replica keys, the waiting period begins when the last of its replica keys is + // deleted. Otherwise, the waiting period begins immediately. This value is + // optional. If you include a value, it must be between 7 and 30, inclusive. If you + // do not include a value, it defaults to 30. + PendingWindowInDays *int32 + + noSmithyDocumentSerde +} + +type ScheduleKeyDeletionOutput struct { + + // The date and time after which KMS deletes the KMS key. If the KMS key is a + // multi-Region primary key with replica keys, this field does not appear. The + // deletion date for the primary key isn't known until its last replica key is + // deleted. + DeletionDate *time.Time + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key whose deletion is scheduled. + KeyId *string + + // The current status of the KMS key. For more information about how key state + // affects the use of a KMS key, see Key states of KMS keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the + // Key Management Service Developer Guide. + KeyState types.KeyState + + // The waiting period before the KMS key is deleted. If the KMS key is a + // multi-Region primary key with replicas, the waiting period begins when the last + // of its replica keys is deleted. Otherwise, the waiting period begins + // immediately. + PendingWindowInDays *int32 + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationScheduleKeyDeletionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpScheduleKeyDeletion{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpScheduleKeyDeletion{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpScheduleKeyDeletionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opScheduleKeyDeletion(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opScheduleKeyDeletion(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ScheduleKeyDeletion", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go new file mode 100644 index 00000000000..4c66cbd9a45 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go @@ -0,0 +1,238 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a digital signature (https://en.wikipedia.org/wiki/Digital_signature) +// for a message or message digest by using the private key in an asymmetric +// signing KMS key. To verify the signature, use the Verify operation, or use the +// public key in the same asymmetric KMS key outside of KMS. For information about +// asymmetric KMS keys, see Asymmetric KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// in the Key Management Service Developer Guide. Digital signatures are generated +// and verified by using asymmetric key pair, such as an RSA or ECC pair that is +// represented by an asymmetric KMS key. The key owner (or an authorized user) uses +// their private key to sign a message. Anyone with the public key can verify that +// the message was signed with that particular private key and that the message +// hasn't changed since it was signed. To use the Sign operation, provide the +// following information: +// +// * Use the KeyId parameter to identify an asymmetric KMS +// key with a KeyUsage value of SIGN_VERIFY. To get the KeyUsage value of a KMS +// key, use the DescribeKey operation. The caller must have kms:Sign permission on +// the KMS key. +// +// * Use the Message parameter to specify the message or message +// digest to sign. You can submit messages of up to 4096 bytes. To sign a larger +// message, generate a hash digest of the message, and then provide the hash digest +// in the Message parameter. To indicate whether the message is a full message or a +// digest, use the MessageType parameter. +// +// * Choose a signing algorithm that is +// compatible with the KMS key. +// +// When signing a message, be sure to record the KMS +// key and the signing algorithm. This information is required to verify the +// signature. Best practices recommend that you limit the time during which any +// signature is effective. This deters an attack where the actor uses a signed +// message to establish validity repeatedly or long after the message is +// superseded. Signatures do not include a timestamp, but you can include a +// timestamp in the signed message to help you detect when its time to refresh the +// signature. To verify the signature that this operation generates, use the Verify +// operation. Or use the GetPublicKey operation to download the public key and then +// use the public key to verify the signature outside of KMS. The KMS key that you +// use for this operation must be in a compatible key state. For details, see Key +// states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: Yes. To perform this +// operation with a KMS key in a different Amazon Web Services account, specify the +// key ARN or alias ARN in the value of the KeyId parameter. Required permissions: +// kms:Sign +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: Verify +func (c *Client) Sign(ctx context.Context, params *SignInput, optFns ...func(*Options)) (*SignOutput, error) { + if params == nil { + params = &SignInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Sign", params, optFns, c.addOperationSignMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*SignOutput) + out.ResultMetadata = metadata + return out, nil +} + +type SignInput struct { + + // Identifies an asymmetric KMS key. KMS uses the private key in the asymmetric KMS + // key to sign the message. The KeyUsage type of the KMS key must be SIGN_VERIFY. + // To find the KeyUsage of a KMS key, use the DescribeKey operation. To specify a + // KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias + // name, prefix it with "alias/". To specify a KMS key in a different Amazon Web + // Services account, you must use the key ARN or alias ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Specifies the message or message digest to sign. Messages can be 0-4096 bytes. + // To sign a larger message, provide the message digest. If you provide a message, + // KMS generates a hash digest of the message and then signs it. + // + // This member is required. + Message []byte + + // Specifies the signing algorithm to use when signing the message. Choose an + // algorithm that is compatible with the type and size of the specified asymmetric + // KMS key. + // + // This member is required. + SigningAlgorithm types.SigningAlgorithmSpec + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + // Tells KMS whether the value of the Message parameter is a message or message + // digest. The default value, RAW, indicates a message. To indicate a message + // digest, enter DIGEST. + MessageType types.MessageType + + noSmithyDocumentSerde +} + +type SignOutput struct { + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the asymmetric KMS key that was used to sign the message. + KeyId *string + + // The cryptographic signature that was generated for the message. + // + // * When used + // with the supported RSA signing algorithms, the encoding of this value is defined + // by PKCS #1 in RFC 8017 (https://tools.ietf.org/html/rfc8017). + // + // * When used with + // the ECDSA_SHA_256, ECDSA_SHA_384, or ECDSA_SHA_512 signing algorithms, this + // value is a DER-encoded object as defined by ANS X9.62–2005 and RFC 3279 Section + // 2.2.3 (https://tools.ietf.org/html/rfc3279#section-2.2.3). This is the most + // commonly used signature format and is appropriate for most uses. + // + // When you use + // the HTTP API or the Amazon Web Services CLI, the value is Base64-encoded. + // Otherwise, it is not Base64-encoded. + Signature []byte + + // The signing algorithm that was used to sign the message. + SigningAlgorithm types.SigningAlgorithmSpec + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationSignMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpSign{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpSign{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpSignValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSign(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opSign(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "Sign", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go new file mode 100644 index 00000000000..c7358175cbb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go @@ -0,0 +1,176 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Adds or edits tags on a customer managed key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). +// Tagging or untagging a KMS key can allow or deny permission to the KMS key. For +// details, see ABAC for KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key +// Management Service Developer Guide. Each tag consists of a tag key and a tag +// value, both of which are case-sensitive strings. The tag value can be an empty +// (null) string. To add a tag, specify a new tag key and a tag value. To edit a +// tag, specify an existing tag key and a new tag value. You can use this operation +// to tag a customer managed key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk), +// but you cannot tag an Amazon Web Services managed key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk), +// an Amazon Web Services owned key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk), +// a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#keystore-concept), +// or an alias +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#alias-concept). +// You can also add tags to a KMS key while creating it (CreateKey) or replicating +// it (ReplicateKey). For information about using tags in KMS, see Tagging keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). For +// general information about tags, including the format and syntax, see Tagging +// Amazon Web Services resources +// (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the Amazon +// Web Services General Reference. The KMS key that you use for this operation must +// be in a compatible key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:TagResource +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations +// +// * CreateKey +// +// * ListResourceTags +// +// * +// ReplicateKey +// +// * UntagResource +func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { + if params == nil { + params = &TagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "TagResource", params, optFns, c.addOperationTagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*TagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type TagResourceInput struct { + + // Identifies a customer managed key in the account and Region. Specify the key ID + // or key ARN of the KMS key. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // One or more tags. Each tag consists of a tag key and a tag value. The tag value + // can be an empty (null) string. You cannot have more than one tag on a KMS key + // with the same tag key. If you specify an existing tag key with a different tag + // value, KMS replaces the current tag value with the specified one. + // + // This member is required. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type TagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpTagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opTagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "TagResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go new file mode 100644 index 00000000000..121e4abdee1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go @@ -0,0 +1,161 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes tags from a customer managed key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). +// To delete a tag, specify the tag key and the KMS key. Tagging or untagging a KMS +// key can allow or deny permission to the KMS key. For details, see ABAC for KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key +// Management Service Developer Guide. When it succeeds, the UntagResource +// operation doesn't return any output. Also, if the specified tag key isn't found +// on the KMS key, it doesn't throw an exception or return a response. To confirm +// that the operation worked, use the ListResourceTags operation. For information +// about using tags in KMS, see Tagging keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). For +// general information about tags, including the format and syntax, see Tagging +// Amazon Web Services resources +// (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the Amazon +// Web Services General Reference. The KMS key that you use for this operation must +// be in a compatible key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:UntagResource +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations +// +// * CreateKey +// +// * ListResourceTags +// +// * +// ReplicateKey +// +// * TagResource +func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { + if params == nil { + params = &UntagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UntagResource", params, optFns, c.addOperationUntagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UntagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UntagResourceInput struct { + + // Identifies the KMS key from which you are removing tags. Specify the key ID or + // key ARN of the KMS key. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // One or more tag keys. Specify only the tag keys, not the tag values. + // + // This member is required. + TagKeys []string + + noSmithyDocumentSerde +} + +type UntagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUntagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUntagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "UntagResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go new file mode 100644 index 00000000000..74d1a329aaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go @@ -0,0 +1,185 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Associates an existing KMS alias with a different KMS key. Each alias is +// associated with only one KMS key at a time, although a KMS key can have multiple +// aliases. The alias and the KMS key must be in the same Amazon Web Services +// account and Region. Adding, deleting, or updating an alias can allow or deny +// permission to the KMS key. For details, see ABAC for KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key +// Management Service Developer Guide. The current and new KMS key must be the same +// type (both symmetric or both asymmetric or both HMAC), and they must have the +// same key usage. This restriction prevents errors in code that uses aliases. If +// you must assign an alias to a different type of KMS key, use DeleteAlias to +// delete the old alias and CreateAlias to create a new alias. You cannot use +// UpdateAlias to change an alias name. To change an alias name, use DeleteAlias to +// delete the old alias and CreateAlias to create a new alias. Because an alias is +// not a property of a KMS key, you can create, update, and delete the aliases of a +// KMS key without affecting the KMS key. Also, aliases do not appear in the +// response from the DescribeKey operation. To get the aliases of all KMS keys in +// the account, use the ListAliases operation. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions +// +// * kms:UpdateAlias +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the alias (IAM policy). +// +// * kms:UpdateAlias +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the current KMS key (key policy). +// +// * kms:UpdateAlias +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the new KMS key (key policy). +// +// For details, see Controlling access to aliases +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) +// in the Key Management Service Developer Guide. Related operations: +// +// * +// CreateAlias +// +// * DeleteAlias +// +// * ListAliases +func (c *Client) UpdateAlias(ctx context.Context, params *UpdateAliasInput, optFns ...func(*Options)) (*UpdateAliasOutput, error) { + if params == nil { + params = &UpdateAliasInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateAlias", params, optFns, c.addOperationUpdateAliasMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateAliasOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateAliasInput struct { + + // Identifies the alias that is changing its KMS key. This value must begin with + // alias/ followed by the alias name, such as alias/ExampleAlias. You cannot use + // UpdateAlias to change the alias name. + // + // This member is required. + AliasName *string + + // Identifies the customer managed key + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) + // to associate with the alias. You don't have permission to associate an alias + // with an Amazon Web Services managed key + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). + // The KMS key must be in the same Amazon Web Services account and Region as the + // alias. Also, the new target KMS key must be the same type as the current target + // KMS key (both symmetric or both asymmetric or both HMAC) and they must have the + // same key usage. Specify the key ID or key ARN of the KMS key. For example: + // + // * + // Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To verify + // that the alias is mapped to the correct KMS key, use ListAliases. + // + // This member is required. + TargetKeyId *string + + noSmithyDocumentSerde +} + +type UpdateAliasOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateAliasMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateAlias{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateAlias{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateAliasValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateAlias(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateAlias(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "UpdateAlias", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go new file mode 100644 index 00000000000..86b9d04c68f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go @@ -0,0 +1,266 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Changes the properties of a custom key store. You can use this operation to +// change the properties of an CloudHSM key store or an external key store. Use the +// required CustomKeyStoreId parameter to identify the custom key store. Use the +// remaining optional parameters to change its properties. This operation does not +// return any property values. To verify the updated property values, use the +// DescribeCustomKeyStores operation. This operation is part of the custom key +// stores +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// feature in KMS, which combines the convenience and extensive integration of KMS +// with the isolation and control of a key store that you own and manage. When +// updating the properties of an external key store, verify that the updated +// settings connect your key store, via the external key store proxy, to the same +// external key manager as the previous settings, or to a backup or snapshot of the +// external key manager with the same cryptographic keys. If the updated connection +// settings fail, you can fix them and retry, although an extended delay might +// disrupt Amazon Web Services services. However, if KMS permanently loses its +// access to cryptographic keys, ciphertext encrypted under those keys is +// unrecoverable. For external key stores: Some external key managers provide a +// simpler method for updating an external key store. For details, see your +// external key manager documentation. When updating an external key store in the +// KMS console, you can upload a JSON-based proxy configuration file with the +// desired values. You cannot upload the proxy configuration file to the +// UpdateCustomKeyStore operation. However, you can use the file to help you +// determine the correct values for the UpdateCustomKeyStore parameters. For an +// CloudHSM key store, you can use this operation to change the custom key store +// friendly name (NewCustomKeyStoreName), to tell KMS about a change to the kmsuser +// crypto user password (KeyStorePassword), or to associate the custom key store +// with a different, but related, CloudHSM cluster (CloudHsmClusterId). To update +// any property of an CloudHSM key store, the ConnectionState of the CloudHSM key +// store must be DISCONNECTED. For an external key store, you can use this +// operation to change the custom key store friendly name (NewCustomKeyStoreName), +// or to tell KMS about a change to the external key store proxy authentication +// credentials (XksProxyAuthenticationCredential), connection method +// (XksProxyConnectivity), external proxy endpoint (XksProxyUriEndpoint) and path +// (XksProxyUriPath). For external key stores with an XksProxyConnectivity of +// VPC_ENDPOINT_SERVICE, you can also update the Amazon VPC endpoint service name +// (XksProxyVpcEndpointServiceName). To update most properties of an external key +// store, the ConnectionState of the external key store must be DISCONNECTED. +// However, you can update the CustomKeyStoreName, +// XksProxyAuthenticationCredential, and XksProxyUriPath of an external key store +// when it is in the CONNECTED or DISCONNECTED state. If your update requires a +// DISCONNECTED state, before using UpdateCustomKeyStore, use the +// DisconnectCustomKeyStore operation to disconnect the custom key store. After the +// UpdateCustomKeyStore operation completes, use the ConnectCustomKeyStore to +// reconnect the custom key store. To find the ConnectionState of the custom key +// store, use the DescribeCustomKeyStores operation. Before updating the custom key +// store, verify that the new values allow KMS to connect the custom key store to +// its backing key store. For example, before you change the XksProxyUriPath value, +// verify that the external key store proxy is reachable at the new path. If the +// operation succeeds, it returns a JSON object with no properties. Cross-account +// use: No. You cannot perform this operation on a custom key store in a different +// Amazon Web Services account. Required permissions: kms:UpdateCustomKeyStore +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) Related operations: +// +// * ConnectCustomKeyStore +// +// * +// CreateCustomKeyStore +// +// * DeleteCustomKeyStore +// +// * DescribeCustomKeyStores +// +// * +// DisconnectCustomKeyStore +func (c *Client) UpdateCustomKeyStore(ctx context.Context, params *UpdateCustomKeyStoreInput, optFns ...func(*Options)) (*UpdateCustomKeyStoreOutput, error) { + if params == nil { + params = &UpdateCustomKeyStoreInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateCustomKeyStore", params, optFns, c.addOperationUpdateCustomKeyStoreMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateCustomKeyStoreOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateCustomKeyStoreInput struct { + + // Identifies the custom key store that you want to update. Enter the ID of the + // custom key store. To find the ID of a custom key store, use the + // DescribeCustomKeyStores operation. + // + // This member is required. + CustomKeyStoreId *string + + // Associates the custom key store with a related CloudHSM cluster. This parameter + // is valid only for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM. + // Enter the cluster ID of the cluster that you used to create the custom key store + // or a cluster that shares a backup history and has the same cluster certificate + // as the original cluster. You cannot use this parameter to associate a custom key + // store with an unrelated cluster. In addition, the replacement cluster must + // fulfill the requirements + // (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) + // for a cluster associated with a custom key store. To view the cluster + // certificate of a cluster, use the DescribeClusters + // (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) + // operation. To change this value, the CloudHSM key store must be disconnected. + CloudHsmClusterId *string + + // Enter the current password of the kmsuser crypto user (CU) in the CloudHSM + // cluster that is associated with the custom key store. This parameter is valid + // only for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM. This + // parameter tells KMS the current password of the kmsuser crypto user (CU). It + // does not set or change the password of any users in the CloudHSM cluster. To + // change this value, the CloudHSM key store must be disconnected. + KeyStorePassword *string + + // Changes the friendly name of the custom key store to the value that you specify. + // The custom key store name must be unique in the Amazon Web Services account. To + // change this value, an CloudHSM key store must be disconnected. An external key + // store can be connected or disconnected. + NewCustomKeyStoreName *string + + // Changes the credentials that KMS uses to sign requests to the external key store + // proxy (XKS proxy). This parameter is valid only for custom key stores with a + // CustomKeyStoreType of EXTERNAL_KEY_STORE. You must specify both the AccessKeyId + // and SecretAccessKey value in the authentication credential, even if you are only + // updating one value. This parameter doesn't establish or change your + // authentication credentials on the proxy. It just tells KMS the credential that + // you established with your external key store proxy. For example, if you rotate + // the credential on your external key store proxy, you can use this parameter to + // update the credential in KMS. You can change this value when the external key + // store is connected or disconnected. + XksProxyAuthenticationCredential *types.XksProxyAuthenticationCredentialType + + // Changes the connectivity setting for the external key store. To indicate that + // the external key store proxy uses a Amazon VPC endpoint service to communicate + // with KMS, specify VPC_ENDPOINT_SERVICE. Otherwise, specify PUBLIC_ENDPOINT. If + // you change the XksProxyConnectivity to VPC_ENDPOINT_SERVICE, you must also + // change the XksProxyUriEndpoint and add an XksProxyVpcEndpointServiceName value. + // If you change the XksProxyConnectivity to PUBLIC_ENDPOINT, you must also change + // the XksProxyUriEndpoint and specify a null or empty string for the + // XksProxyVpcEndpointServiceName value. To change this value, the external key + // store must be disconnected. + XksProxyConnectivity types.XksProxyConnectivityType + + // Changes the URI endpoint that KMS uses to connect to your external key store + // proxy (XKS proxy). This parameter is valid only for custom key stores with a + // CustomKeyStoreType of EXTERNAL_KEY_STORE. For external key stores with an + // XksProxyConnectivity value of PUBLIC_ENDPOINT, the protocol must be HTTPS. For + // external key stores with an XksProxyConnectivity value of VPC_ENDPOINT_SERVICE, + // specify https:// followed by the private DNS name associated with the VPC + // endpoint service. Each external key store must use a different private DNS name. + // The combined XksProxyUriEndpoint and XksProxyUriPath values must be unique in + // the Amazon Web Services account and Region. To change this value, the external + // key store must be disconnected. + XksProxyUriEndpoint *string + + // Changes the base path to the proxy APIs for this external key store. To find + // this value, see the documentation for your external key manager and external key + // store proxy (XKS proxy). This parameter is valid only for custom key stores with + // a CustomKeyStoreType of EXTERNAL_KEY_STORE. The value must start with / and must + // end with /kms/xks/v1, where v1 represents the version of the KMS external key + // store proxy API. You can include an optional prefix between the required + // elements such as /example/kms/xks/v1. The combined XksProxyUriEndpoint and + // XksProxyUriPath values must be unique in the Amazon Web Services account and + // Region. You can change this value when the external key store is connected or + // disconnected. + XksProxyUriPath *string + + // Changes the name that KMS uses to identify the Amazon VPC endpoint service for + // your external key store proxy (XKS proxy). This parameter is valid when the + // CustomKeyStoreType is EXTERNAL_KEY_STORE and the XksProxyConnectivity is + // VPC_ENDPOINT_SERVICE. To change this value, the external key store must be + // disconnected. + XksProxyVpcEndpointServiceName *string + + noSmithyDocumentSerde +} + +type UpdateCustomKeyStoreOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateCustomKeyStoreMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateCustomKeyStoreValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateCustomKeyStore(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateCustomKeyStore(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "UpdateCustomKeyStore", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go new file mode 100644 index 00000000000..b0700a418d0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go @@ -0,0 +1,143 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates the description of a KMS key. To see the description of a KMS key, use +// DescribeKey. The KMS key that you use for this operation must be in a compatible +// key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:UpdateKeyDescription +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations +// +// * CreateKey +// +// * DescribeKey +func (c *Client) UpdateKeyDescription(ctx context.Context, params *UpdateKeyDescriptionInput, optFns ...func(*Options)) (*UpdateKeyDescriptionOutput, error) { + if params == nil { + params = &UpdateKeyDescriptionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateKeyDescription", params, optFns, c.addOperationUpdateKeyDescriptionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateKeyDescriptionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateKeyDescriptionInput struct { + + // New description for the KMS key. + // + // This member is required. + Description *string + + // Updates the description of the specified KMS key. Specify the key ID or key ARN + // of the KMS key. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type UpdateKeyDescriptionOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateKeyDescriptionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateKeyDescription{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateKeyDescription{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateKeyDescriptionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateKeyDescription(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateKeyDescription(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "UpdateKeyDescription", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go new file mode 100644 index 00000000000..e7b87d5dfec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go @@ -0,0 +1,199 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Changes the primary key of a multi-Region key. This operation changes the +// replica key in the specified Region to a primary key and changes the former +// primary key to a replica key. For example, suppose you have a primary key in +// us-east-1 and a replica key in eu-west-2. If you run UpdatePrimaryRegion with a +// PrimaryRegion value of eu-west-2, the primary key is now the key in eu-west-2, +// and the key in us-east-1 becomes a replica key. For details, see Updating the +// primary Region +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-update) +// in the Key Management Service Developer Guide. This operation supports +// multi-Region keys, an KMS feature that lets you create multiple interoperable +// KMS keys in different Amazon Web Services Regions. Because these KMS keys have +// the same key ID, key material, and other metadata, you can use them +// interchangeably to encrypt data in one Amazon Web Services Region and decrypt it +// in a different Amazon Web Services Region without re-encrypting the data or +// making a cross-Region call. For more information about multi-Region keys, see +// Multi-Region keys in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) +// in the Key Management Service Developer Guide. The primary key of a multi-Region +// key is the source for properties that are always shared by primary and replica +// keys, including the key material, key ID +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id), +// key spec +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec), +// key usage +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage), +// key material origin +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin), +// and automatic key rotation +// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html). It's +// the only key that can be replicated. You cannot delete the primary key +// (https://docs.aws.amazon.com/kms/latest/APIReference/API_ScheduleKeyDeletion.html) +// until all replica keys are deleted. The key ID and primary Region that you +// specify uniquely identify the replica key that will become the primary key. The +// primary Region must already have a replica key. This operation does not create a +// KMS key in the specified Region. To find the replica keys, use the DescribeKey +// operation on the primary key or any replica key. To create a replica key, use +// the ReplicateKey operation. You can run this operation while using the affected +// multi-Region keys in cryptographic operations. This operation should not delay, +// interrupt, or cause failures in cryptographic operations. Even after this +// operation completes, the process of updating the primary Region might still be +// in progress for a few more seconds. Operations such as DescribeKey might display +// both the old and new primary keys as replicas. The old and new primary keys have +// a transient key state of Updating. The original key state is restored when the +// update is complete. While the key state is Updating, you can use the keys in +// cryptographic operations, but you cannot replicate the new primary key or +// perform certain management operations, such as enabling or disabling these keys. +// For details about the Updating key state, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. This operation does not return any +// output. To verify that primary key is changed, use the DescribeKey operation. +// Cross-account use: No. You cannot use this operation in a different Amazon Web +// Services account. Required permissions: +// +// * kms:UpdatePrimaryRegion on the +// current primary key (in the primary key's Region). Include this permission +// primary key's key policy. +// +// * kms:UpdatePrimaryRegion on the current replica key +// (in the replica key's Region). Include this permission in the replica key's key +// policy. +// +// # Related operations +// +// * CreateKey +// +// * ReplicateKey +func (c *Client) UpdatePrimaryRegion(ctx context.Context, params *UpdatePrimaryRegionInput, optFns ...func(*Options)) (*UpdatePrimaryRegionOutput, error) { + if params == nil { + params = &UpdatePrimaryRegionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdatePrimaryRegion", params, optFns, c.addOperationUpdatePrimaryRegionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdatePrimaryRegionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdatePrimaryRegionInput struct { + + // Identifies the current primary key. When the operation completes, this KMS key + // will be a replica key. Specify the key ID or key ARN of a multi-Region primary + // key. For example: + // + // * Key ID: mrk-1234abcd12ab34cd56ef1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // The Amazon Web Services Region of the new primary key. Enter the Region ID, such + // as us-east-1 or ap-southeast-2. There must be an existing replica key in this + // Region. When the operation completes, the multi-Region key in this Region will + // be the primary key. + // + // This member is required. + PrimaryRegion *string + + noSmithyDocumentSerde +} + +type UpdatePrimaryRegionOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdatePrimaryRegionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdatePrimaryRegion{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdatePrimaryRegion{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdatePrimaryRegionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdatePrimaryRegion(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdatePrimaryRegion(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "UpdatePrimaryRegion", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go new file mode 100644 index 00000000000..f118f5da0c6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go @@ -0,0 +1,222 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Verifies a digital signature that was generated by the Sign operation. +// Verification confirms that an authorized user signed the message with the +// specified KMS key and signing algorithm, and the message hasn't changed since it +// was signed. If the signature is verified, the value of the SignatureValid field +// in the response is True. If the signature verification fails, the Verify +// operation fails with an KMSInvalidSignatureException exception. A digital +// signature is generated by using the private key in an asymmetric KMS key. The +// signature is verified by using the public key in the same asymmetric KMS key. +// For information about asymmetric KMS keys, see Asymmetric KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// in the Key Management Service Developer Guide. To verify a digital signature, +// you can use the Verify operation. Specify the same asymmetric KMS key, message, +// and signing algorithm that were used to produce the signature. You can also +// verify the digital signature by using the public key of the KMS key outside of +// KMS. Use the GetPublicKey operation to download the public key in the asymmetric +// KMS key and then use the public key to verify the signature outside of KMS. The +// advantage of using the Verify operation is that it is performed within KMS. As a +// result, it's easy to call, the operation is performed within the FIPS boundary, +// it is logged in CloudTrail, and you can use key policy and IAM policy to +// determine who is authorized to use the KMS key to verify signatures. To verify a +// signature outside of KMS with an SM2 public key (China Regions only), you must +// specify the distinguishing ID. By default, KMS uses 1234567812345678 as the +// distinguishing ID. For more information, see Offline verification with SM2 key +// pairs +// (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification). +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: Yes. To perform this +// operation with a KMS key in a different Amazon Web Services account, specify the +// key ARN or alias ARN in the value of the KeyId parameter. Required permissions: +// kms:Verify +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: Sign +func (c *Client) Verify(ctx context.Context, params *VerifyInput, optFns ...func(*Options)) (*VerifyOutput, error) { + if params == nil { + params = &VerifyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Verify", params, optFns, c.addOperationVerifyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*VerifyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type VerifyInput struct { + + // Identifies the asymmetric KMS key that will be used to verify the signature. + // This must be the same KMS key that was used to generate the signature. If you + // specify a different KMS key, the signature verification fails. To specify a KMS + // key, use its key ID, key ARN, alias name, or alias ARN. When using an alias + // name, prefix it with "alias/". To specify a KMS key in a different Amazon Web + // Services account, you must use the key ARN or alias ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Specifies the message that was signed. You can submit a raw message of up to + // 4096 bytes, or a hash digest of the message. If you submit a digest, use the + // MessageType parameter with a value of DIGEST. If the message specified here is + // different from the message that was signed, the signature verification fails. A + // message and its hash digest are considered to be the same message. + // + // This member is required. + Message []byte + + // The signature that the Sign operation generated. + // + // This member is required. + Signature []byte + + // The signing algorithm that was used to sign the message. If you submit a + // different algorithm, the signature verification fails. + // + // This member is required. + SigningAlgorithm types.SigningAlgorithmSpec + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + // Tells KMS whether the value of the Message parameter is a message or message + // digest. The default value, RAW, indicates a message. To indicate a message + // digest, enter DIGEST. Use the DIGEST value only when the value of the Message + // parameter is a message digest. If you use the DIGEST value with a raw message, + // the security of the verification operation can be compromised. + MessageType types.MessageType + + noSmithyDocumentSerde +} + +type VerifyOutput struct { + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the asymmetric KMS key that was used to verify the signature. + KeyId *string + + // A Boolean value that indicates whether the signature was verified. A value of + // True indicates that the Signature was produced by signing the Message with the + // specified KeyID and SigningAlgorithm. If the signature is not verified, the + // Verify operation fails with a KMSInvalidSignatureException exception. + SignatureValid bool + + // The signing algorithm that was used to verify the signature. + SigningAlgorithm types.SigningAlgorithmSpec + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationVerifyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpVerify{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpVerify{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpVerifyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opVerify(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opVerify(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "Verify", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go new file mode 100644 index 00000000000..bbd63d79a9d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go @@ -0,0 +1,183 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Verifies the hash-based message authentication code (HMAC) for a specified +// message, HMAC KMS key, and MAC algorithm. To verify the HMAC, VerifyMac computes +// an HMAC using the message, HMAC KMS key, and MAC algorithm that you specify, and +// compares the computed HMAC to the HMAC that you specify. If the HMACs are +// identical, the verification succeeds; otherwise, it fails. Verification +// indicates that the message hasn't changed since the HMAC was calculated, and the +// specified key was used to generate and verify the HMAC. HMAC KMS keys and the +// HMAC algorithms that KMS uses conform to industry standards defined in RFC 2104 +// (https://datatracker.ietf.org/doc/html/rfc2104). This operation is part of KMS +// support for HMAC KMS keys. For details, see HMAC keys in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) in the Key +// Management Service Developer Guide. The KMS key that you use for this operation +// must be in a compatible key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: Yes. To perform this +// operation with a KMS key in a different Amazon Web Services account, specify the +// key ARN or alias ARN in the value of the KeyId parameter. Required permissions: +// kms:VerifyMac +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: GenerateMac +func (c *Client) VerifyMac(ctx context.Context, params *VerifyMacInput, optFns ...func(*Options)) (*VerifyMacOutput, error) { + if params == nil { + params = &VerifyMacInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "VerifyMac", params, optFns, c.addOperationVerifyMacMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*VerifyMacOutput) + out.ResultMetadata = metadata + return out, nil +} + +type VerifyMacInput struct { + + // The KMS key that will be used in the verification. Enter a key ID of the KMS key + // that was used to generate the HMAC. If you identify a different KMS key, the + // VerifyMac operation fails. + // + // This member is required. + KeyId *string + + // The HMAC to verify. Enter the HMAC that was generated by the GenerateMac + // operation when you specified the same message, HMAC KMS key, and MAC algorithm + // as the values specified in this request. + // + // This member is required. + Mac []byte + + // The MAC algorithm that will be used in the verification. Enter the same MAC + // algorithm that was used to compute the HMAC. This algorithm must be supported by + // the HMAC KMS key identified by the KeyId parameter. + // + // This member is required. + MacAlgorithm types.MacAlgorithmSpec + + // The message that will be used in the verification. Enter the same message that + // was used to generate the HMAC. GenerateMac and VerifyMac do not provide special + // handling for message digests. If you generated an HMAC for a hash digest of a + // message, you must verify the HMAC for the same hash digest. + // + // This member is required. + Message []byte + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + noSmithyDocumentSerde +} + +type VerifyMacOutput struct { + + // The HMAC KMS key used in the verification. + KeyId *string + + // The MAC algorithm used in the verification. + MacAlgorithm types.MacAlgorithmSpec + + // A Boolean value that indicates whether the HMAC was verified. A value of True + // indicates that the HMAC (Mac) was generated with the specified Message, HMAC KMS + // key (KeyID) and MacAlgorithm.. If the HMAC is not verified, the VerifyMac + // operation fails with a KMSInvalidMacException exception. This exception + // indicates that one or more of the inputs changed since the HMAC was computed. + MacValid bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationVerifyMacMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpVerifyMac{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpVerifyMac{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpVerifyMacValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opVerifyMac(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opVerifyMac(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "VerifyMac", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go new file mode 100644 index 00000000000..17e5b82a40a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go @@ -0,0 +1,12815 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "io/ioutil" + "strings" +) + +type awsAwsjson11_deserializeOpCancelKeyDeletion struct { +} + +func (*awsAwsjson11_deserializeOpCancelKeyDeletion) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCancelKeyDeletion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCancelKeyDeletion(response, &metadata) + } + output := &CancelKeyDeletionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCancelKeyDeletionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCancelKeyDeletion(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpConnectCustomKeyStore struct { +} + +func (*awsAwsjson11_deserializeOpConnectCustomKeyStore) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpConnectCustomKeyStore) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorConnectCustomKeyStore(response, &metadata) + } + output := &ConnectCustomKeyStoreOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentConnectCustomKeyStoreOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorConnectCustomKeyStore(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("CloudHsmClusterInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotActiveException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotActiveException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateAlias struct { +} + +func (*awsAwsjson11_deserializeOpCreateAlias) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateAlias) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateAlias(response, &metadata) + } + output := &CreateAliasOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateAlias(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorAlreadyExistsException(response, errorBody) + + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidAliasNameException", errorCode): + return awsAwsjson11_deserializeErrorInvalidAliasNameException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateCustomKeyStore struct { +} + +func (*awsAwsjson11_deserializeOpCreateCustomKeyStore) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateCustomKeyStore) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateCustomKeyStore(response, &metadata) + } + output := &CreateCustomKeyStoreOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateCustomKeyStoreOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateCustomKeyStore(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("CloudHsmClusterInUseException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterInUseException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotActiveException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotActiveException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotFoundException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNameInUseException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNameInUseException(response, errorBody) + + case strings.EqualFold("IncorrectTrustAnchorException", errorCode): + return awsAwsjson11_deserializeErrorIncorrectTrustAnchorException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("XksProxyIncorrectAuthenticationCredentialException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyIncorrectAuthenticationCredentialException(response, errorBody) + + case strings.EqualFold("XksProxyInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("XksProxyInvalidResponseException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyInvalidResponseException(response, errorBody) + + case strings.EqualFold("XksProxyUriEndpointInUseException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyUriEndpointInUseException(response, errorBody) + + case strings.EqualFold("XksProxyUriInUseException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyUriInUseException(response, errorBody) + + case strings.EqualFold("XksProxyUriUnreachableException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyUriUnreachableException(response, errorBody) + + case strings.EqualFold("XksProxyVpcEndpointServiceInUseException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceInUseException(response, errorBody) + + case strings.EqualFold("XksProxyVpcEndpointServiceInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("XksProxyVpcEndpointServiceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateGrant struct { +} + +func (*awsAwsjson11_deserializeOpCreateGrant) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateGrant) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateGrant(response, &metadata) + } + output := &CreateGrantOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateGrantOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateGrant(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateKey struct { +} + +func (*awsAwsjson11_deserializeOpCreateKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateKey(response, &metadata) + } + output := &CreateKeyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateKeyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("CloudHsmClusterInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocumentException", errorCode): + return awsAwsjson11_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("TagException", errorCode): + return awsAwsjson11_deserializeErrorTagException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + case strings.EqualFold("XksKeyAlreadyInUseException", errorCode): + return awsAwsjson11_deserializeErrorXksKeyAlreadyInUseException(response, errorBody) + + case strings.EqualFold("XksKeyInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorXksKeyInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("XksKeyNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorXksKeyNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDecrypt struct { +} + +func (*awsAwsjson11_deserializeOpDecrypt) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDecrypt) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDecrypt(response, &metadata) + } + output := &DecryptOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDecryptOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDecrypt(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("IncorrectKeyException", errorCode): + return awsAwsjson11_deserializeErrorIncorrectKeyException(response, errorBody) + + case strings.EqualFold("InvalidCiphertextException", errorCode): + return awsAwsjson11_deserializeErrorInvalidCiphertextException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteAlias struct { +} + +func (*awsAwsjson11_deserializeOpDeleteAlias) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteAlias) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteAlias(response, &metadata) + } + output := &DeleteAliasOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteAlias(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteCustomKeyStore struct { +} + +func (*awsAwsjson11_deserializeOpDeleteCustomKeyStore) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteCustomKeyStore) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteCustomKeyStore(response, &metadata) + } + output := &DeleteCustomKeyStoreOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteCustomKeyStoreOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteCustomKeyStore(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("CustomKeyStoreHasCMKsException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreHasCMKsException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteImportedKeyMaterial struct { +} + +func (*awsAwsjson11_deserializeOpDeleteImportedKeyMaterial) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteImportedKeyMaterial) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteImportedKeyMaterial(response, &metadata) + } + output := &DeleteImportedKeyMaterialOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteImportedKeyMaterial(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeCustomKeyStores struct { +} + +func (*awsAwsjson11_deserializeOpDescribeCustomKeyStores) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeCustomKeyStores) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeCustomKeyStores(response, &metadata) + } + output := &DescribeCustomKeyStoresOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeCustomKeyStoresOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeCustomKeyStores(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeKey struct { +} + +func (*awsAwsjson11_deserializeOpDescribeKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeKey(response, &metadata) + } + output := &DescribeKeyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeKeyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDisableKey struct { +} + +func (*awsAwsjson11_deserializeOpDisableKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDisableKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDisableKey(response, &metadata) + } + output := &DisableKeyOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDisableKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDisableKeyRotation struct { +} + +func (*awsAwsjson11_deserializeOpDisableKeyRotation) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDisableKeyRotation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDisableKeyRotation(response, &metadata) + } + output := &DisableKeyRotationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDisableKeyRotation(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDisconnectCustomKeyStore struct { +} + +func (*awsAwsjson11_deserializeOpDisconnectCustomKeyStore) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDisconnectCustomKeyStore) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDisconnectCustomKeyStore(response, &metadata) + } + output := &DisconnectCustomKeyStoreOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDisconnectCustomKeyStoreOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDisconnectCustomKeyStore(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpEnableKey struct { +} + +func (*awsAwsjson11_deserializeOpEnableKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpEnableKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorEnableKey(response, &metadata) + } + output := &EnableKeyOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorEnableKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpEnableKeyRotation struct { +} + +func (*awsAwsjson11_deserializeOpEnableKeyRotation) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpEnableKeyRotation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorEnableKeyRotation(response, &metadata) + } + output := &EnableKeyRotationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorEnableKeyRotation(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpEncrypt struct { +} + +func (*awsAwsjson11_deserializeOpEncrypt) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpEncrypt) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorEncrypt(response, &metadata) + } + output := &EncryptOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentEncryptOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorEncrypt(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateDataKey struct { +} + +func (*awsAwsjson11_deserializeOpGenerateDataKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateDataKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateDataKey(response, &metadata) + } + output := &GenerateDataKeyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateDataKeyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateDataKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateDataKeyPair struct { +} + +func (*awsAwsjson11_deserializeOpGenerateDataKeyPair) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateDataKeyPair) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateDataKeyPair(response, &metadata) + } + output := &GenerateDataKeyPairOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateDataKeyPairOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateDataKeyPair(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateDataKeyPairWithoutPlaintext struct { +} + +func (*awsAwsjson11_deserializeOpGenerateDataKeyPairWithoutPlaintext) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateDataKeyPairWithoutPlaintext) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateDataKeyPairWithoutPlaintext(response, &metadata) + } + output := &GenerateDataKeyPairWithoutPlaintextOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateDataKeyPairWithoutPlaintextOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateDataKeyPairWithoutPlaintext(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateDataKeyWithoutPlaintext struct { +} + +func (*awsAwsjson11_deserializeOpGenerateDataKeyWithoutPlaintext) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateDataKeyWithoutPlaintext) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateDataKeyWithoutPlaintext(response, &metadata) + } + output := &GenerateDataKeyWithoutPlaintextOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateDataKeyWithoutPlaintextOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateDataKeyWithoutPlaintext(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateMac struct { +} + +func (*awsAwsjson11_deserializeOpGenerateMac) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateMac) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateMac(response, &metadata) + } + output := &GenerateMacOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateMacOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateMac(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateRandom struct { +} + +func (*awsAwsjson11_deserializeOpGenerateRandom) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateRandom) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateRandom(response, &metadata) + } + output := &GenerateRandomOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateRandomOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateRandom(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetKeyPolicy struct { +} + +func (*awsAwsjson11_deserializeOpGetKeyPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetKeyPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetKeyPolicy(response, &metadata) + } + output := &GetKeyPolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetKeyPolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetKeyPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetKeyRotationStatus struct { +} + +func (*awsAwsjson11_deserializeOpGetKeyRotationStatus) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetKeyRotationStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetKeyRotationStatus(response, &metadata) + } + output := &GetKeyRotationStatusOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetKeyRotationStatusOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetKeyRotationStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetParametersForImport struct { +} + +func (*awsAwsjson11_deserializeOpGetParametersForImport) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetParametersForImport) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetParametersForImport(response, &metadata) + } + output := &GetParametersForImportOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetParametersForImportOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetParametersForImport(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetPublicKey struct { +} + +func (*awsAwsjson11_deserializeOpGetPublicKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetPublicKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetPublicKey(response, &metadata) + } + output := &GetPublicKeyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetPublicKeyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetPublicKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpImportKeyMaterial struct { +} + +func (*awsAwsjson11_deserializeOpImportKeyMaterial) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpImportKeyMaterial) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorImportKeyMaterial(response, &metadata) + } + output := &ImportKeyMaterialOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentImportKeyMaterialOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorImportKeyMaterial(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("ExpiredImportTokenException", errorCode): + return awsAwsjson11_deserializeErrorExpiredImportTokenException(response, errorBody) + + case strings.EqualFold("IncorrectKeyMaterialException", errorCode): + return awsAwsjson11_deserializeErrorIncorrectKeyMaterialException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidCiphertextException", errorCode): + return awsAwsjson11_deserializeErrorInvalidCiphertextException(response, errorBody) + + case strings.EqualFold("InvalidImportTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidImportTokenException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListAliases struct { +} + +func (*awsAwsjson11_deserializeOpListAliases) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListAliases) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListAliases(response, &metadata) + } + output := &ListAliasesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListAliasesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListAliases(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListGrants struct { +} + +func (*awsAwsjson11_deserializeOpListGrants) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListGrants) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListGrants(response, &metadata) + } + output := &ListGrantsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListGrantsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListGrants(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidGrantIdException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantIdException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListKeyPolicies struct { +} + +func (*awsAwsjson11_deserializeOpListKeyPolicies) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListKeyPolicies) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListKeyPolicies(response, &metadata) + } + output := &ListKeyPoliciesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListKeyPoliciesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListKeyPolicies(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListKeys struct { +} + +func (*awsAwsjson11_deserializeOpListKeys) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListKeys) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListKeys(response, &metadata) + } + output := &ListKeysOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListKeysOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListKeys(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListResourceTags struct { +} + +func (*awsAwsjson11_deserializeOpListResourceTags) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListResourceTags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListResourceTags(response, &metadata) + } + output := &ListResourceTagsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListResourceTagsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListResourceTags(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListRetirableGrants struct { +} + +func (*awsAwsjson11_deserializeOpListRetirableGrants) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListRetirableGrants) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListRetirableGrants(response, &metadata) + } + output := &ListRetirableGrantsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListRetirableGrantsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListRetirableGrants(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutKeyPolicy struct { +} + +func (*awsAwsjson11_deserializeOpPutKeyPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutKeyPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutKeyPolicy(response, &metadata) + } + output := &PutKeyPolicyOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutKeyPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocumentException", errorCode): + return awsAwsjson11_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpReEncrypt struct { +} + +func (*awsAwsjson11_deserializeOpReEncrypt) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpReEncrypt) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorReEncrypt(response, &metadata) + } + output := &ReEncryptOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentReEncryptOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorReEncrypt(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("IncorrectKeyException", errorCode): + return awsAwsjson11_deserializeErrorIncorrectKeyException(response, errorBody) + + case strings.EqualFold("InvalidCiphertextException", errorCode): + return awsAwsjson11_deserializeErrorInvalidCiphertextException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpReplicateKey struct { +} + +func (*awsAwsjson11_deserializeOpReplicateKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpReplicateKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorReplicateKey(response, &metadata) + } + output := &ReplicateKeyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentReplicateKeyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorReplicateKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorAlreadyExistsException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocumentException", errorCode): + return awsAwsjson11_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("TagException", errorCode): + return awsAwsjson11_deserializeErrorTagException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpRetireGrant struct { +} + +func (*awsAwsjson11_deserializeOpRetireGrant) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpRetireGrant) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorRetireGrant(response, &metadata) + } + output := &RetireGrantOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorRetireGrant(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidGrantIdException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantIdException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpRevokeGrant struct { +} + +func (*awsAwsjson11_deserializeOpRevokeGrant) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpRevokeGrant) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorRevokeGrant(response, &metadata) + } + output := &RevokeGrantOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorRevokeGrant(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidGrantIdException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantIdException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpScheduleKeyDeletion struct { +} + +func (*awsAwsjson11_deserializeOpScheduleKeyDeletion) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpScheduleKeyDeletion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorScheduleKeyDeletion(response, &metadata) + } + output := &ScheduleKeyDeletionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentScheduleKeyDeletionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorScheduleKeyDeletion(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpSign struct { +} + +func (*awsAwsjson11_deserializeOpSign) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpSign) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorSign(response, &metadata) + } + output := &SignOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentSignOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorSign(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpTagResource struct { +} + +func (*awsAwsjson11_deserializeOpTagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorTagResource(response, &metadata) + } + output := &TagResourceOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("TagException", errorCode): + return awsAwsjson11_deserializeErrorTagException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUntagResource struct { +} + +func (*awsAwsjson11_deserializeOpUntagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUntagResource(response, &metadata) + } + output := &UntagResourceOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("TagException", errorCode): + return awsAwsjson11_deserializeErrorTagException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateAlias struct { +} + +func (*awsAwsjson11_deserializeOpUpdateAlias) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateAlias) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateAlias(response, &metadata) + } + output := &UpdateAliasOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateAlias(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateCustomKeyStore struct { +} + +func (*awsAwsjson11_deserializeOpUpdateCustomKeyStore) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateCustomKeyStore) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateCustomKeyStore(response, &metadata) + } + output := &UpdateCustomKeyStoreOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateCustomKeyStoreOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateCustomKeyStore(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("CloudHsmClusterInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotActiveException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotActiveException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotFoundException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotRelatedException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotRelatedException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNameInUseException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNameInUseException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("XksProxyIncorrectAuthenticationCredentialException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyIncorrectAuthenticationCredentialException(response, errorBody) + + case strings.EqualFold("XksProxyInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("XksProxyInvalidResponseException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyInvalidResponseException(response, errorBody) + + case strings.EqualFold("XksProxyUriEndpointInUseException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyUriEndpointInUseException(response, errorBody) + + case strings.EqualFold("XksProxyUriInUseException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyUriInUseException(response, errorBody) + + case strings.EqualFold("XksProxyUriUnreachableException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyUriUnreachableException(response, errorBody) + + case strings.EqualFold("XksProxyVpcEndpointServiceInUseException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceInUseException(response, errorBody) + + case strings.EqualFold("XksProxyVpcEndpointServiceInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("XksProxyVpcEndpointServiceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateKeyDescription struct { +} + +func (*awsAwsjson11_deserializeOpUpdateKeyDescription) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateKeyDescription) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateKeyDescription(response, &metadata) + } + output := &UpdateKeyDescriptionOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateKeyDescription(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdatePrimaryRegion struct { +} + +func (*awsAwsjson11_deserializeOpUpdatePrimaryRegion) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdatePrimaryRegion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdatePrimaryRegion(response, &metadata) + } + output := &UpdatePrimaryRegionOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdatePrimaryRegion(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpVerify struct { +} + +func (*awsAwsjson11_deserializeOpVerify) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpVerify) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorVerify(response, &metadata) + } + output := &VerifyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentVerifyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorVerify(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidSignatureException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidSignatureException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpVerifyMac struct { +} + +func (*awsAwsjson11_deserializeOpVerifyMac) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpVerifyMac) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorVerifyMac(response, &metadata) + } + output := &VerifyMacOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentVerifyMacOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorVerifyMac(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidMacException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidMacException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsAwsjson11_deserializeErrorAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.AlreadyExistsException{} + err := awsAwsjson11_deserializeDocumentAlreadyExistsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCloudHsmClusterInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CloudHsmClusterInUseException{} + err := awsAwsjson11_deserializeDocumentCloudHsmClusterInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CloudHsmClusterInvalidConfigurationException{} + err := awsAwsjson11_deserializeDocumentCloudHsmClusterInvalidConfigurationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCloudHsmClusterNotActiveException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CloudHsmClusterNotActiveException{} + err := awsAwsjson11_deserializeDocumentCloudHsmClusterNotActiveException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCloudHsmClusterNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CloudHsmClusterNotFoundException{} + err := awsAwsjson11_deserializeDocumentCloudHsmClusterNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCloudHsmClusterNotRelatedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CloudHsmClusterNotRelatedException{} + err := awsAwsjson11_deserializeDocumentCloudHsmClusterNotRelatedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCustomKeyStoreHasCMKsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CustomKeyStoreHasCMKsException{} + err := awsAwsjson11_deserializeDocumentCustomKeyStoreHasCMKsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CustomKeyStoreInvalidStateException{} + err := awsAwsjson11_deserializeDocumentCustomKeyStoreInvalidStateException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCustomKeyStoreNameInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CustomKeyStoreNameInUseException{} + err := awsAwsjson11_deserializeDocumentCustomKeyStoreNameInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CustomKeyStoreNotFoundException{} + err := awsAwsjson11_deserializeDocumentCustomKeyStoreNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorDependencyTimeoutException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.DependencyTimeoutException{} + err := awsAwsjson11_deserializeDocumentDependencyTimeoutException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorDisabledException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.DisabledException{} + err := awsAwsjson11_deserializeDocumentDisabledException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorExpiredImportTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ExpiredImportTokenException{} + err := awsAwsjson11_deserializeDocumentExpiredImportTokenException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorIncorrectKeyException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.IncorrectKeyException{} + err := awsAwsjson11_deserializeDocumentIncorrectKeyException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorIncorrectKeyMaterialException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.IncorrectKeyMaterialException{} + err := awsAwsjson11_deserializeDocumentIncorrectKeyMaterialException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorIncorrectTrustAnchorException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.IncorrectTrustAnchorException{} + err := awsAwsjson11_deserializeDocumentIncorrectTrustAnchorException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidAliasNameException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidAliasNameException{} + err := awsAwsjson11_deserializeDocumentInvalidAliasNameException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidArnException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidArnException{} + err := awsAwsjson11_deserializeDocumentInvalidArnException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidCiphertextException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidCiphertextException{} + err := awsAwsjson11_deserializeDocumentInvalidCiphertextException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidGrantIdException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidGrantIdException{} + err := awsAwsjson11_deserializeDocumentInvalidGrantIdException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidGrantTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidGrantTokenException{} + err := awsAwsjson11_deserializeDocumentInvalidGrantTokenException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidImportTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidImportTokenException{} + err := awsAwsjson11_deserializeDocumentInvalidImportTokenException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidKeyUsageException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidKeyUsageException{} + err := awsAwsjson11_deserializeDocumentInvalidKeyUsageException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidMarkerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidMarkerException{} + err := awsAwsjson11_deserializeDocumentInvalidMarkerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorKeyUnavailableException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.KeyUnavailableException{} + err := awsAwsjson11_deserializeDocumentKeyUnavailableException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorKMSInternalException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.KMSInternalException{} + err := awsAwsjson11_deserializeDocumentKMSInternalException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorKMSInvalidMacException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.KMSInvalidMacException{} + err := awsAwsjson11_deserializeDocumentKMSInvalidMacException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorKMSInvalidSignatureException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.KMSInvalidSignatureException{} + err := awsAwsjson11_deserializeDocumentKMSInvalidSignatureException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorKMSInvalidStateException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.KMSInvalidStateException{} + err := awsAwsjson11_deserializeDocumentKMSInvalidStateException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorLimitExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.LimitExceededException{} + err := awsAwsjson11_deserializeDocumentLimitExceededException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorMalformedPolicyDocumentException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.MalformedPolicyDocumentException{} + err := awsAwsjson11_deserializeDocumentMalformedPolicyDocumentException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.NotFoundException{} + err := awsAwsjson11_deserializeDocumentNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorTagException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TagException{} + err := awsAwsjson11_deserializeDocumentTagException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorUnsupportedOperationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.UnsupportedOperationException{} + err := awsAwsjson11_deserializeDocumentUnsupportedOperationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksKeyAlreadyInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksKeyAlreadyInUseException{} + err := awsAwsjson11_deserializeDocumentXksKeyAlreadyInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksKeyInvalidConfigurationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksKeyInvalidConfigurationException{} + err := awsAwsjson11_deserializeDocumentXksKeyInvalidConfigurationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksKeyNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksKeyNotFoundException{} + err := awsAwsjson11_deserializeDocumentXksKeyNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyIncorrectAuthenticationCredentialException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyIncorrectAuthenticationCredentialException{} + err := awsAwsjson11_deserializeDocumentXksProxyIncorrectAuthenticationCredentialException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyInvalidConfigurationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyInvalidConfigurationException{} + err := awsAwsjson11_deserializeDocumentXksProxyInvalidConfigurationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyInvalidResponseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyInvalidResponseException{} + err := awsAwsjson11_deserializeDocumentXksProxyInvalidResponseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyUriEndpointInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyUriEndpointInUseException{} + err := awsAwsjson11_deserializeDocumentXksProxyUriEndpointInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyUriInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyUriInUseException{} + err := awsAwsjson11_deserializeDocumentXksProxyUriInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyUriUnreachableException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyUriUnreachableException{} + err := awsAwsjson11_deserializeDocumentXksProxyUriUnreachableException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyVpcEndpointServiceInUseException{} + err := awsAwsjson11_deserializeDocumentXksProxyVpcEndpointServiceInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceInvalidConfigurationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyVpcEndpointServiceInvalidConfigurationException{} + err := awsAwsjson11_deserializeDocumentXksProxyVpcEndpointServiceInvalidConfigurationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyVpcEndpointServiceNotFoundException{} + err := awsAwsjson11_deserializeDocumentXksProxyVpcEndpointServiceNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeDocumentAliasList(v *[]types.AliasListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.AliasListEntry + if *v == nil { + cv = []types.AliasListEntry{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.AliasListEntry + destAddr := &col + if err := awsAwsjson11_deserializeDocumentAliasListEntry(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentAliasListEntry(v **types.AliasListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AliasListEntry + if *v == nil { + sv = &types.AliasListEntry{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AliasArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ArnType to be of type string, got %T instead", value) + } + sv.AliasArn = ptr.String(jtv) + } + + case "AliasName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AliasNameType to be of type string, got %T instead", value) + } + sv.AliasName = ptr.String(jtv) + } + + case "CreationDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "LastUpdatedDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastUpdatedDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "TargetKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.TargetKeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentAlreadyExistsException(v **types.AlreadyExistsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AlreadyExistsException + if *v == nil { + sv = &types.AlreadyExistsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCloudHsmClusterInUseException(v **types.CloudHsmClusterInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CloudHsmClusterInUseException + if *v == nil { + sv = &types.CloudHsmClusterInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCloudHsmClusterInvalidConfigurationException(v **types.CloudHsmClusterInvalidConfigurationException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CloudHsmClusterInvalidConfigurationException + if *v == nil { + sv = &types.CloudHsmClusterInvalidConfigurationException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCloudHsmClusterNotActiveException(v **types.CloudHsmClusterNotActiveException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CloudHsmClusterNotActiveException + if *v == nil { + sv = &types.CloudHsmClusterNotActiveException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCloudHsmClusterNotFoundException(v **types.CloudHsmClusterNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CloudHsmClusterNotFoundException + if *v == nil { + sv = &types.CloudHsmClusterNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCloudHsmClusterNotRelatedException(v **types.CloudHsmClusterNotRelatedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CloudHsmClusterNotRelatedException + if *v == nil { + sv = &types.CloudHsmClusterNotRelatedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoreHasCMKsException(v **types.CustomKeyStoreHasCMKsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomKeyStoreHasCMKsException + if *v == nil { + sv = &types.CustomKeyStoreHasCMKsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoreInvalidStateException(v **types.CustomKeyStoreInvalidStateException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomKeyStoreInvalidStateException + if *v == nil { + sv = &types.CustomKeyStoreInvalidStateException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoreNameInUseException(v **types.CustomKeyStoreNameInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomKeyStoreNameInUseException + if *v == nil { + sv = &types.CustomKeyStoreNameInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoreNotFoundException(v **types.CustomKeyStoreNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomKeyStoreNotFoundException + if *v == nil { + sv = &types.CustomKeyStoreNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoresList(v *[]types.CustomKeyStoresListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.CustomKeyStoresListEntry + if *v == nil { + cv = []types.CustomKeyStoresListEntry{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.CustomKeyStoresListEntry + destAddr := &col + if err := awsAwsjson11_deserializeDocumentCustomKeyStoresListEntry(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoresListEntry(v **types.CustomKeyStoresListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomKeyStoresListEntry + if *v == nil { + sv = &types.CustomKeyStoresListEntry{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CloudHsmClusterId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CloudHsmClusterIdType to be of type string, got %T instead", value) + } + sv.CloudHsmClusterId = ptr.String(jtv) + } + + case "ConnectionErrorCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConnectionErrorCodeType to be of type string, got %T instead", value) + } + sv.ConnectionErrorCode = types.ConnectionErrorCodeType(jtv) + } + + case "ConnectionState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConnectionStateType to be of type string, got %T instead", value) + } + sv.ConnectionState = types.ConnectionStateType(jtv) + } + + case "CreationDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "CustomKeyStoreId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomKeyStoreIdType to be of type string, got %T instead", value) + } + sv.CustomKeyStoreId = ptr.String(jtv) + } + + case "CustomKeyStoreName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomKeyStoreNameType to be of type string, got %T instead", value) + } + sv.CustomKeyStoreName = ptr.String(jtv) + } + + case "CustomKeyStoreType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomKeyStoreType to be of type string, got %T instead", value) + } + sv.CustomKeyStoreType = types.CustomKeyStoreType(jtv) + } + + case "TrustAnchorCertificate": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TrustAnchorCertificateType to be of type string, got %T instead", value) + } + sv.TrustAnchorCertificate = ptr.String(jtv) + } + + case "XksProxyConfiguration": + if err := awsAwsjson11_deserializeDocumentXksProxyConfigurationType(&sv.XksProxyConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDependencyTimeoutException(v **types.DependencyTimeoutException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DependencyTimeoutException + if *v == nil { + sv = &types.DependencyTimeoutException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDisabledException(v **types.DisabledException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DisabledException + if *v == nil { + sv = &types.DisabledException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEncryptionAlgorithmSpecList(v *[]types.EncryptionAlgorithmSpec, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.EncryptionAlgorithmSpec + if *v == nil { + cv = []types.EncryptionAlgorithmSpec{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.EncryptionAlgorithmSpec + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionAlgorithmSpec to be of type string, got %T instead", value) + } + col = types.EncryptionAlgorithmSpec(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentEncryptionContextType(v *map[string]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]string + if *v == nil { + mv = map[string]string{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionContextValue to be of type string, got %T instead", value) + } + parsedVal = jtv + } + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson11_deserializeDocumentExpiredImportTokenException(v **types.ExpiredImportTokenException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExpiredImportTokenException + if *v == nil { + sv = &types.ExpiredImportTokenException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentGrantConstraints(v **types.GrantConstraints, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.GrantConstraints + if *v == nil { + sv = &types.GrantConstraints{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EncryptionContextEquals": + if err := awsAwsjson11_deserializeDocumentEncryptionContextType(&sv.EncryptionContextEquals, value); err != nil { + return err + } + + case "EncryptionContextSubset": + if err := awsAwsjson11_deserializeDocumentEncryptionContextType(&sv.EncryptionContextSubset, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentGrantList(v *[]types.GrantListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.GrantListEntry + if *v == nil { + cv = []types.GrantListEntry{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.GrantListEntry + destAddr := &col + if err := awsAwsjson11_deserializeDocumentGrantListEntry(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentGrantListEntry(v **types.GrantListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.GrantListEntry + if *v == nil { + sv = &types.GrantListEntry{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Constraints": + if err := awsAwsjson11_deserializeDocumentGrantConstraints(&sv.Constraints, value); err != nil { + return err + } + + case "CreationDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "GranteePrincipal": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PrincipalIdType to be of type string, got %T instead", value) + } + sv.GranteePrincipal = ptr.String(jtv) + } + + case "GrantId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GrantIdType to be of type string, got %T instead", value) + } + sv.GrantId = ptr.String(jtv) + } + + case "IssuingAccount": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PrincipalIdType to be of type string, got %T instead", value) + } + sv.IssuingAccount = ptr.String(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GrantNameType to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "Operations": + if err := awsAwsjson11_deserializeDocumentGrantOperationList(&sv.Operations, value); err != nil { + return err + } + + case "RetiringPrincipal": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PrincipalIdType to be of type string, got %T instead", value) + } + sv.RetiringPrincipal = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentGrantOperationList(v *[]types.GrantOperation, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.GrantOperation + if *v == nil { + cv = []types.GrantOperation{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.GrantOperation + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GrantOperation to be of type string, got %T instead", value) + } + col = types.GrantOperation(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentIncorrectKeyException(v **types.IncorrectKeyException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.IncorrectKeyException + if *v == nil { + sv = &types.IncorrectKeyException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentIncorrectKeyMaterialException(v **types.IncorrectKeyMaterialException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.IncorrectKeyMaterialException + if *v == nil { + sv = &types.IncorrectKeyMaterialException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentIncorrectTrustAnchorException(v **types.IncorrectTrustAnchorException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.IncorrectTrustAnchorException + if *v == nil { + sv = &types.IncorrectTrustAnchorException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidAliasNameException(v **types.InvalidAliasNameException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidAliasNameException + if *v == nil { + sv = &types.InvalidAliasNameException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidArnException(v **types.InvalidArnException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidArnException + if *v == nil { + sv = &types.InvalidArnException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidCiphertextException(v **types.InvalidCiphertextException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidCiphertextException + if *v == nil { + sv = &types.InvalidCiphertextException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidGrantIdException(v **types.InvalidGrantIdException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidGrantIdException + if *v == nil { + sv = &types.InvalidGrantIdException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidGrantTokenException(v **types.InvalidGrantTokenException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidGrantTokenException + if *v == nil { + sv = &types.InvalidGrantTokenException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidImportTokenException(v **types.InvalidImportTokenException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidImportTokenException + if *v == nil { + sv = &types.InvalidImportTokenException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidKeyUsageException(v **types.InvalidKeyUsageException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidKeyUsageException + if *v == nil { + sv = &types.InvalidKeyUsageException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidMarkerException(v **types.InvalidMarkerException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidMarkerException + if *v == nil { + sv = &types.InvalidMarkerException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKeyList(v *[]types.KeyListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.KeyListEntry + if *v == nil { + cv = []types.KeyListEntry{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.KeyListEntry + destAddr := &col + if err := awsAwsjson11_deserializeDocumentKeyListEntry(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentKeyListEntry(v **types.KeyListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KeyListEntry + if *v == nil { + sv = &types.KeyListEntry{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ArnType to be of type string, got %T instead", value) + } + sv.KeyArn = ptr.String(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKeyMetadata(v **types.KeyMetadata, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KeyMetadata + if *v == nil { + sv = &types.KeyMetadata{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ArnType to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "AWSAccountId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AWSAccountIdType to be of type string, got %T instead", value) + } + sv.AWSAccountId = ptr.String(jtv) + } + + case "CloudHsmClusterId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CloudHsmClusterIdType to be of type string, got %T instead", value) + } + sv.CloudHsmClusterId = ptr.String(jtv) + } + + case "CreationDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "CustomerMasterKeySpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomerMasterKeySpec to be of type string, got %T instead", value) + } + sv.CustomerMasterKeySpec = types.CustomerMasterKeySpec(jtv) + } + + case "CustomKeyStoreId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomKeyStoreIdType to be of type string, got %T instead", value) + } + sv.CustomKeyStoreId = ptr.String(jtv) + } + + case "DeletionDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.DeletionDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DescriptionType to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "Enabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Enabled = jtv + } + + case "EncryptionAlgorithms": + if err := awsAwsjson11_deserializeDocumentEncryptionAlgorithmSpecList(&sv.EncryptionAlgorithms, value); err != nil { + return err + } + + case "ExpirationModel": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExpirationModelType to be of type string, got %T instead", value) + } + sv.ExpirationModel = types.ExpirationModelType(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "KeyManager": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyManagerType to be of type string, got %T instead", value) + } + sv.KeyManager = types.KeyManagerType(jtv) + } + + case "KeySpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeySpec to be of type string, got %T instead", value) + } + sv.KeySpec = types.KeySpec(jtv) + } + + case "KeyState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyState to be of type string, got %T instead", value) + } + sv.KeyState = types.KeyState(jtv) + } + + case "KeyUsage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyUsageType to be of type string, got %T instead", value) + } + sv.KeyUsage = types.KeyUsageType(jtv) + } + + case "MacAlgorithms": + if err := awsAwsjson11_deserializeDocumentMacAlgorithmSpecList(&sv.MacAlgorithms, value); err != nil { + return err + } + + case "MultiRegion": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected NullableBooleanType to be of type *bool, got %T instead", value) + } + sv.MultiRegion = ptr.Bool(jtv) + } + + case "MultiRegionConfiguration": + if err := awsAwsjson11_deserializeDocumentMultiRegionConfiguration(&sv.MultiRegionConfiguration, value); err != nil { + return err + } + + case "Origin": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected OriginType to be of type string, got %T instead", value) + } + sv.Origin = types.OriginType(jtv) + } + + case "PendingDeletionWindowInDays": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PendingWindowInDaysType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.PendingDeletionWindowInDays = ptr.Int32(int32(i64)) + } + + case "SigningAlgorithms": + if err := awsAwsjson11_deserializeDocumentSigningAlgorithmSpecList(&sv.SigningAlgorithms, value); err != nil { + return err + } + + case "ValidTo": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ValidTo = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "XksKeyConfiguration": + if err := awsAwsjson11_deserializeDocumentXksKeyConfigurationType(&sv.XksKeyConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKeyUnavailableException(v **types.KeyUnavailableException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KeyUnavailableException + if *v == nil { + sv = &types.KeyUnavailableException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKMSInternalException(v **types.KMSInternalException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KMSInternalException + if *v == nil { + sv = &types.KMSInternalException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKMSInvalidMacException(v **types.KMSInvalidMacException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KMSInvalidMacException + if *v == nil { + sv = &types.KMSInvalidMacException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKMSInvalidSignatureException(v **types.KMSInvalidSignatureException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KMSInvalidSignatureException + if *v == nil { + sv = &types.KMSInvalidSignatureException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKMSInvalidStateException(v **types.KMSInvalidStateException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KMSInvalidStateException + if *v == nil { + sv = &types.KMSInvalidStateException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLimitExceededException(v **types.LimitExceededException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LimitExceededException + if *v == nil { + sv = &types.LimitExceededException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMacAlgorithmSpecList(v *[]types.MacAlgorithmSpec, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.MacAlgorithmSpec + if *v == nil { + cv = []types.MacAlgorithmSpec{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.MacAlgorithmSpec + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MacAlgorithmSpec to be of type string, got %T instead", value) + } + col = types.MacAlgorithmSpec(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentMalformedPolicyDocumentException(v **types.MalformedPolicyDocumentException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MalformedPolicyDocumentException + if *v == nil { + sv = &types.MalformedPolicyDocumentException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMultiRegionConfiguration(v **types.MultiRegionConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MultiRegionConfiguration + if *v == nil { + sv = &types.MultiRegionConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MultiRegionKeyType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MultiRegionKeyType to be of type string, got %T instead", value) + } + sv.MultiRegionKeyType = types.MultiRegionKeyType(jtv) + } + + case "PrimaryKey": + if err := awsAwsjson11_deserializeDocumentMultiRegionKey(&sv.PrimaryKey, value); err != nil { + return err + } + + case "ReplicaKeys": + if err := awsAwsjson11_deserializeDocumentMultiRegionKeyList(&sv.ReplicaKeys, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMultiRegionKey(v **types.MultiRegionKey, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MultiRegionKey + if *v == nil { + sv = &types.MultiRegionKey{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ArnType to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "Region": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegionType to be of type string, got %T instead", value) + } + sv.Region = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMultiRegionKeyList(v *[]types.MultiRegionKey, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.MultiRegionKey + if *v == nil { + cv = []types.MultiRegionKey{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.MultiRegionKey + destAddr := &col + if err := awsAwsjson11_deserializeDocumentMultiRegionKey(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentNotFoundException(v **types.NotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NotFoundException + if *v == nil { + sv = &types.NotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentPolicyNameList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PolicyNameType to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentSigningAlgorithmSpecList(v *[]types.SigningAlgorithmSpec, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.SigningAlgorithmSpec + if *v == nil { + cv = []types.SigningAlgorithmSpec{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.SigningAlgorithmSpec + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SigningAlgorithmSpec to be of type string, got %T instead", value) + } + col = types.SigningAlgorithmSpec(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentTag(v **types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Tag + if *v == nil { + sv = &types.Tag{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TagKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagKeyType to be of type string, got %T instead", value) + } + sv.TagKey = ptr.String(jtv) + } + + case "TagValue": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagValueType to be of type string, got %T instead", value) + } + sv.TagValue = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTagException(v **types.TagException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TagException + if *v == nil { + sv = &types.TagException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTagList(v *[]types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Tag + if *v == nil { + cv = []types.Tag{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Tag + destAddr := &col + if err := awsAwsjson11_deserializeDocumentTag(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentUnsupportedOperationException(v **types.UnsupportedOperationException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnsupportedOperationException + if *v == nil { + sv = &types.UnsupportedOperationException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksKeyAlreadyInUseException(v **types.XksKeyAlreadyInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksKeyAlreadyInUseException + if *v == nil { + sv = &types.XksKeyAlreadyInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksKeyConfigurationType(v **types.XksKeyConfigurationType, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksKeyConfigurationType + if *v == nil { + sv = &types.XksKeyConfigurationType{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Id": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected XksKeyIdType to be of type string, got %T instead", value) + } + sv.Id = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksKeyInvalidConfigurationException(v **types.XksKeyInvalidConfigurationException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksKeyInvalidConfigurationException + if *v == nil { + sv = &types.XksKeyInvalidConfigurationException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksKeyNotFoundException(v **types.XksKeyNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksKeyNotFoundException + if *v == nil { + sv = &types.XksKeyNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyConfigurationType(v **types.XksProxyConfigurationType, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyConfigurationType + if *v == nil { + sv = &types.XksProxyConfigurationType{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AccessKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected XksProxyAuthenticationAccessKeyIdType to be of type string, got %T instead", value) + } + sv.AccessKeyId = ptr.String(jtv) + } + + case "Connectivity": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected XksProxyConnectivityType to be of type string, got %T instead", value) + } + sv.Connectivity = types.XksProxyConnectivityType(jtv) + } + + case "UriEndpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected XksProxyUriEndpointType to be of type string, got %T instead", value) + } + sv.UriEndpoint = ptr.String(jtv) + } + + case "UriPath": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected XksProxyUriPathType to be of type string, got %T instead", value) + } + sv.UriPath = ptr.String(jtv) + } + + case "VpcEndpointServiceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected XksProxyVpcEndpointServiceNameType to be of type string, got %T instead", value) + } + sv.VpcEndpointServiceName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyIncorrectAuthenticationCredentialException(v **types.XksProxyIncorrectAuthenticationCredentialException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyIncorrectAuthenticationCredentialException + if *v == nil { + sv = &types.XksProxyIncorrectAuthenticationCredentialException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyInvalidConfigurationException(v **types.XksProxyInvalidConfigurationException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyInvalidConfigurationException + if *v == nil { + sv = &types.XksProxyInvalidConfigurationException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyInvalidResponseException(v **types.XksProxyInvalidResponseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyInvalidResponseException + if *v == nil { + sv = &types.XksProxyInvalidResponseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyUriEndpointInUseException(v **types.XksProxyUriEndpointInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyUriEndpointInUseException + if *v == nil { + sv = &types.XksProxyUriEndpointInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyUriInUseException(v **types.XksProxyUriInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyUriInUseException + if *v == nil { + sv = &types.XksProxyUriInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyUriUnreachableException(v **types.XksProxyUriUnreachableException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyUriUnreachableException + if *v == nil { + sv = &types.XksProxyUriUnreachableException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyVpcEndpointServiceInUseException(v **types.XksProxyVpcEndpointServiceInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyVpcEndpointServiceInUseException + if *v == nil { + sv = &types.XksProxyVpcEndpointServiceInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyVpcEndpointServiceInvalidConfigurationException(v **types.XksProxyVpcEndpointServiceInvalidConfigurationException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyVpcEndpointServiceInvalidConfigurationException + if *v == nil { + sv = &types.XksProxyVpcEndpointServiceInvalidConfigurationException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyVpcEndpointServiceNotFoundException(v **types.XksProxyVpcEndpointServiceNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyVpcEndpointServiceNotFoundException + if *v == nil { + sv = &types.XksProxyVpcEndpointServiceNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCancelKeyDeletionOutput(v **CancelKeyDeletionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CancelKeyDeletionOutput + if *v == nil { + sv = &CancelKeyDeletionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentConnectCustomKeyStoreOutput(v **ConnectCustomKeyStoreOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ConnectCustomKeyStoreOutput + if *v == nil { + sv = &ConnectCustomKeyStoreOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateCustomKeyStoreOutput(v **CreateCustomKeyStoreOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateCustomKeyStoreOutput + if *v == nil { + sv = &CreateCustomKeyStoreOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CustomKeyStoreId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomKeyStoreIdType to be of type string, got %T instead", value) + } + sv.CustomKeyStoreId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateGrantOutput(v **CreateGrantOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateGrantOutput + if *v == nil { + sv = &CreateGrantOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "GrantId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GrantIdType to be of type string, got %T instead", value) + } + sv.GrantId = ptr.String(jtv) + } + + case "GrantToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GrantTokenType to be of type string, got %T instead", value) + } + sv.GrantToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateKeyOutput(v **CreateKeyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateKeyOutput + if *v == nil { + sv = &CreateKeyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyMetadata": + if err := awsAwsjson11_deserializeDocumentKeyMetadata(&sv.KeyMetadata, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDecryptOutput(v **DecryptOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DecryptOutput + if *v == nil { + sv = &DecryptOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EncryptionAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionAlgorithmSpec to be of type string, got %T instead", value) + } + sv.EncryptionAlgorithm = types.EncryptionAlgorithmSpec(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "Plaintext": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlaintextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PlaintextType, %w", err) + } + sv.Plaintext = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteCustomKeyStoreOutput(v **DeleteCustomKeyStoreOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteCustomKeyStoreOutput + if *v == nil { + sv = &DeleteCustomKeyStoreOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeCustomKeyStoresOutput(v **DescribeCustomKeyStoresOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeCustomKeyStoresOutput + if *v == nil { + sv = &DescribeCustomKeyStoresOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CustomKeyStores": + if err := awsAwsjson11_deserializeDocumentCustomKeyStoresList(&sv.CustomKeyStores, value); err != nil { + return err + } + + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeKeyOutput(v **DescribeKeyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeKeyOutput + if *v == nil { + sv = &DescribeKeyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyMetadata": + if err := awsAwsjson11_deserializeDocumentKeyMetadata(&sv.KeyMetadata, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDisconnectCustomKeyStoreOutput(v **DisconnectCustomKeyStoreOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DisconnectCustomKeyStoreOutput + if *v == nil { + sv = &DisconnectCustomKeyStoreOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentEncryptOutput(v **EncryptOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *EncryptOutput + if *v == nil { + sv = &EncryptOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextBlob = dv + } + + case "EncryptionAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionAlgorithmSpec to be of type string, got %T instead", value) + } + sv.EncryptionAlgorithm = types.EncryptionAlgorithmSpec(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateDataKeyOutput(v **GenerateDataKeyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateDataKeyOutput + if *v == nil { + sv = &GenerateDataKeyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextBlob = dv + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "Plaintext": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlaintextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PlaintextType, %w", err) + } + sv.Plaintext = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateDataKeyPairOutput(v **GenerateDataKeyPairOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateDataKeyPairOutput + if *v == nil { + sv = &GenerateDataKeyPairOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "KeyPairSpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataKeyPairSpec to be of type string, got %T instead", value) + } + sv.KeyPairSpec = types.DataKeyPairSpec(jtv) + } + + case "PrivateKeyCiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.PrivateKeyCiphertextBlob = dv + } + + case "PrivateKeyPlaintext": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlaintextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PlaintextType, %w", err) + } + sv.PrivateKeyPlaintext = dv + } + + case "PublicKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PublicKeyType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PublicKeyType, %w", err) + } + sv.PublicKey = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateDataKeyPairWithoutPlaintextOutput(v **GenerateDataKeyPairWithoutPlaintextOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateDataKeyPairWithoutPlaintextOutput + if *v == nil { + sv = &GenerateDataKeyPairWithoutPlaintextOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "KeyPairSpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataKeyPairSpec to be of type string, got %T instead", value) + } + sv.KeyPairSpec = types.DataKeyPairSpec(jtv) + } + + case "PrivateKeyCiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.PrivateKeyCiphertextBlob = dv + } + + case "PublicKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PublicKeyType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PublicKeyType, %w", err) + } + sv.PublicKey = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateDataKeyWithoutPlaintextOutput(v **GenerateDataKeyWithoutPlaintextOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateDataKeyWithoutPlaintextOutput + if *v == nil { + sv = &GenerateDataKeyWithoutPlaintextOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextBlob = dv + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateMacOutput(v **GenerateMacOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateMacOutput + if *v == nil { + sv = &GenerateMacOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "Mac": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.Mac = dv + } + + case "MacAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MacAlgorithmSpec to be of type string, got %T instead", value) + } + sv.MacAlgorithm = types.MacAlgorithmSpec(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateRandomOutput(v **GenerateRandomOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateRandomOutput + if *v == nil { + sv = &GenerateRandomOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Plaintext": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlaintextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PlaintextType, %w", err) + } + sv.Plaintext = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetKeyPolicyOutput(v **GetKeyPolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetKeyPolicyOutput + if *v == nil { + sv = &GetKeyPolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Policy": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PolicyType to be of type string, got %T instead", value) + } + sv.Policy = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetKeyRotationStatusOutput(v **GetKeyRotationStatusOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetKeyRotationStatusOutput + if *v == nil { + sv = &GetKeyRotationStatusOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyRotationEnabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.KeyRotationEnabled = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetParametersForImportOutput(v **GetParametersForImportOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetParametersForImportOutput + if *v == nil { + sv = &GetParametersForImportOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ImportToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.ImportToken = dv + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "ParametersValidTo": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ParametersValidTo = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "PublicKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlaintextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PlaintextType, %w", err) + } + sv.PublicKey = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetPublicKeyOutput(v **GetPublicKeyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetPublicKeyOutput + if *v == nil { + sv = &GetPublicKeyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CustomerMasterKeySpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomerMasterKeySpec to be of type string, got %T instead", value) + } + sv.CustomerMasterKeySpec = types.CustomerMasterKeySpec(jtv) + } + + case "EncryptionAlgorithms": + if err := awsAwsjson11_deserializeDocumentEncryptionAlgorithmSpecList(&sv.EncryptionAlgorithms, value); err != nil { + return err + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "KeySpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeySpec to be of type string, got %T instead", value) + } + sv.KeySpec = types.KeySpec(jtv) + } + + case "KeyUsage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyUsageType to be of type string, got %T instead", value) + } + sv.KeyUsage = types.KeyUsageType(jtv) + } + + case "PublicKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PublicKeyType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PublicKeyType, %w", err) + } + sv.PublicKey = dv + } + + case "SigningAlgorithms": + if err := awsAwsjson11_deserializeDocumentSigningAlgorithmSpecList(&sv.SigningAlgorithms, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentImportKeyMaterialOutput(v **ImportKeyMaterialOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ImportKeyMaterialOutput + if *v == nil { + sv = &ImportKeyMaterialOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListAliasesOutput(v **ListAliasesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListAliasesOutput + if *v == nil { + sv = &ListAliasesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Aliases": + if err := awsAwsjson11_deserializeDocumentAliasList(&sv.Aliases, value); err != nil { + return err + } + + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListGrantsOutput(v **ListGrantsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListGrantsOutput + if *v == nil { + sv = &ListGrantsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Grants": + if err := awsAwsjson11_deserializeDocumentGrantList(&sv.Grants, value); err != nil { + return err + } + + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListKeyPoliciesOutput(v **ListKeyPoliciesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListKeyPoliciesOutput + if *v == nil { + sv = &ListKeyPoliciesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "PolicyNames": + if err := awsAwsjson11_deserializeDocumentPolicyNameList(&sv.PolicyNames, value); err != nil { + return err + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListKeysOutput(v **ListKeysOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListKeysOutput + if *v == nil { + sv = &ListKeysOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Keys": + if err := awsAwsjson11_deserializeDocumentKeyList(&sv.Keys, value); err != nil { + return err + } + + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListResourceTagsOutput(v **ListResourceTagsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListResourceTagsOutput + if *v == nil { + sv = &ListResourceTagsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Tags": + if err := awsAwsjson11_deserializeDocumentTagList(&sv.Tags, value); err != nil { + return err + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListRetirableGrantsOutput(v **ListRetirableGrantsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListRetirableGrantsOutput + if *v == nil { + sv = &ListRetirableGrantsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Grants": + if err := awsAwsjson11_deserializeDocumentGrantList(&sv.Grants, value); err != nil { + return err + } + + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentReEncryptOutput(v **ReEncryptOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ReEncryptOutput + if *v == nil { + sv = &ReEncryptOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextBlob = dv + } + + case "DestinationEncryptionAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionAlgorithmSpec to be of type string, got %T instead", value) + } + sv.DestinationEncryptionAlgorithm = types.EncryptionAlgorithmSpec(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "SourceEncryptionAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionAlgorithmSpec to be of type string, got %T instead", value) + } + sv.SourceEncryptionAlgorithm = types.EncryptionAlgorithmSpec(jtv) + } + + case "SourceKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.SourceKeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentReplicateKeyOutput(v **ReplicateKeyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ReplicateKeyOutput + if *v == nil { + sv = &ReplicateKeyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ReplicaKeyMetadata": + if err := awsAwsjson11_deserializeDocumentKeyMetadata(&sv.ReplicaKeyMetadata, value); err != nil { + return err + } + + case "ReplicaPolicy": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PolicyType to be of type string, got %T instead", value) + } + sv.ReplicaPolicy = ptr.String(jtv) + } + + case "ReplicaTags": + if err := awsAwsjson11_deserializeDocumentTagList(&sv.ReplicaTags, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentScheduleKeyDeletionOutput(v **ScheduleKeyDeletionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ScheduleKeyDeletionOutput + if *v == nil { + sv = &ScheduleKeyDeletionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DeletionDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.DeletionDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "KeyState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyState to be of type string, got %T instead", value) + } + sv.KeyState = types.KeyState(jtv) + } + + case "PendingWindowInDays": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PendingWindowInDaysType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.PendingWindowInDays = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentSignOutput(v **SignOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *SignOutput + if *v == nil { + sv = &SignOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "Signature": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.Signature = dv + } + + case "SigningAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SigningAlgorithmSpec to be of type string, got %T instead", value) + } + sv.SigningAlgorithm = types.SigningAlgorithmSpec(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateCustomKeyStoreOutput(v **UpdateCustomKeyStoreOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateCustomKeyStoreOutput + if *v == nil { + sv = &UpdateCustomKeyStoreOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentVerifyMacOutput(v **VerifyMacOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *VerifyMacOutput + if *v == nil { + sv = &VerifyMacOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "MacAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MacAlgorithmSpec to be of type string, got %T instead", value) + } + sv.MacAlgorithm = types.MacAlgorithmSpec(jtv) + } + + case "MacValid": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.MacValid = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentVerifyOutput(v **VerifyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *VerifyOutput + if *v == nil { + sv = &VerifyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "SignatureValid": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.SignatureValid = jtv + } + + case "SigningAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SigningAlgorithmSpec to be of type string, got %T instead", value) + } + sv.SigningAlgorithm = types.SigningAlgorithmSpec(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/doc.go new file mode 100644 index 00000000000..499e4889c6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/doc.go @@ -0,0 +1,78 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package kms provides the API client, operations, and parameter types for AWS Key +// Management Service. +// +// Key Management Service Key Management Service (KMS) is an encryption and key +// management web service. This guide describes the KMS operations that you can +// call programmatically. For general information about KMS, see the Key +// Management Service Developer Guide +// (https://docs.aws.amazon.com/kms/latest/developerguide/). KMS has replaced the +// term customer master key (CMK) with KMS key and KMS key. The concept has not +// changed. To prevent breaking changes, KMS is keeping some variations of this +// term. Amazon Web Services provides SDKs that consist of libraries and sample +// code for various programming languages and platforms (Java, Ruby, .Net, macOS, +// Android, etc.). The SDKs provide a convenient way to create programmatic access +// to KMS and other Amazon Web Services services. For example, the SDKs take care +// of tasks such as signing requests (see below), managing errors, and retrying +// requests automatically. For more information about the Amazon Web Services SDKs, +// including how to download and install them, see Tools for Amazon Web Services +// (http://aws.amazon.com/tools/). We recommend that you use the Amazon Web +// Services SDKs to make programmatic API calls to KMS. If you need to use FIPS +// 140-2 validated cryptographic modules when communicating with Amazon Web +// Services, use the FIPS endpoint in your preferred Amazon Web Services Region. +// For more information about the available FIPS endpoints, see Service endpoints +// (https://docs.aws.amazon.com/general/latest/gr/kms.html#kms_region) in the Key +// Management Service topic of the Amazon Web Services General Reference. All KMS +// API calls must be signed and be transmitted using Transport Layer Security +// (TLS). KMS recommends you always use the latest supported TLS version. Clients +// must also support cipher suites with Perfect Forward Secrecy (PFS) such as +// Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman +// (ECDHE). Most modern systems such as Java 7 and later support these modes. +// Signing Requests Requests must be signed by using an access key ID and a secret +// access key. We strongly recommend that you do not use your Amazon Web Services +// account (root) access key ID and secret access key for everyday work with KMS. +// Instead, use the access key ID and secret access key for an IAM user. You can +// also use the Amazon Web Services Security Token Service to generate temporary +// security credentials that you can use to sign requests. All KMS operations +// require Signature Version 4 +// (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// Logging API Requests KMS supports CloudTrail, a service that logs Amazon Web +// Services API calls and related events for your Amazon Web Services account and +// delivers them to an Amazon S3 bucket that you specify. By using the information +// collected by CloudTrail, you can determine what requests were made to KMS, who +// made the request, when it was made, and so on. To learn more about CloudTrail, +// including how to turn it on and find your log files, see the CloudTrail User +// Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/). Additional +// Resources For more information about credentials and request signing, see the +// following: +// +// * Amazon Web Services Security Credentials +// (https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) - +// This topic provides general information about the types of credentials used to +// access Amazon Web Services. +// +// * Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) - +// This section of the IAM User Guide describes how to create and use temporary +// security credentials. +// +// * Signature Version 4 Signing Process +// (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) - This +// set of topics walks you through the process of signing a request using an access +// key ID and a secret access key. +// +// Commonly Used API Operations Of the API +// operations discussed in this guide, the following will prove the most useful for +// most applications. You will likely perform operations other than these, such as +// creating keys and assigning policies, by using the console. +// +// * Encrypt +// +// * +// Decrypt +// +// * GenerateDataKey +// +// * GenerateDataKeyWithoutPlaintext +package kms diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go new file mode 100644 index 00000000000..43b65e982c4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go @@ -0,0 +1,200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "kms" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided +// fallbackResolver for resolution. +// +// fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + resolver: fallbackResolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/generated.json new file mode 100644 index 00000000000..68a551b7b63 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/generated.json @@ -0,0 +1,77 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_CancelKeyDeletion.go", + "api_op_ConnectCustomKeyStore.go", + "api_op_CreateAlias.go", + "api_op_CreateCustomKeyStore.go", + "api_op_CreateGrant.go", + "api_op_CreateKey.go", + "api_op_Decrypt.go", + "api_op_DeleteAlias.go", + "api_op_DeleteCustomKeyStore.go", + "api_op_DeleteImportedKeyMaterial.go", + "api_op_DescribeCustomKeyStores.go", + "api_op_DescribeKey.go", + "api_op_DisableKey.go", + "api_op_DisableKeyRotation.go", + "api_op_DisconnectCustomKeyStore.go", + "api_op_EnableKey.go", + "api_op_EnableKeyRotation.go", + "api_op_Encrypt.go", + "api_op_GenerateDataKey.go", + "api_op_GenerateDataKeyPair.go", + "api_op_GenerateDataKeyPairWithoutPlaintext.go", + "api_op_GenerateDataKeyWithoutPlaintext.go", + "api_op_GenerateMac.go", + "api_op_GenerateRandom.go", + "api_op_GetKeyPolicy.go", + "api_op_GetKeyRotationStatus.go", + "api_op_GetParametersForImport.go", + "api_op_GetPublicKey.go", + "api_op_ImportKeyMaterial.go", + "api_op_ListAliases.go", + "api_op_ListGrants.go", + "api_op_ListKeyPolicies.go", + "api_op_ListKeys.go", + "api_op_ListResourceTags.go", + "api_op_ListRetirableGrants.go", + "api_op_PutKeyPolicy.go", + "api_op_ReEncrypt.go", + "api_op_ReplicateKey.go", + "api_op_RetireGrant.go", + "api_op_RevokeGrant.go", + "api_op_ScheduleKeyDeletion.go", + "api_op_Sign.go", + "api_op_TagResource.go", + "api_op_UntagResource.go", + "api_op_UpdateAlias.go", + "api_op_UpdateCustomKeyStore.go", + "api_op_UpdateKeyDescription.go", + "api_op_UpdatePrimaryRegion.go", + "api_op_Verify.go", + "api_op_VerifyMac.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/enums.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/kms", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go new file mode 100644 index 00000000000..781099e7b49 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package kms + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.19.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go new file mode 100644 index 00000000000..2868876ac1c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go @@ -0,0 +1,860 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver KMS endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "kms.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "kms-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "ProdFips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "af-south-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.af-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "af-south-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.af-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "af-south-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-northeast-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-northeast-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-northeast-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-northeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-south-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-south-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-south-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-southeast-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-southeast-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ca-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-central-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-central-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-central-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-central-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-north-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-north-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-north-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-north-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-north-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-south-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-south-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-south-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-west-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-3", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-west-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-west-3-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-west-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-3", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.me-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "me-central-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.me-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-central-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-south-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.me-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "me-south-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.me-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-south-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "sa-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.sa-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "sa-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.sa-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "sa-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "kms.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "kms-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "ProdFips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-iso-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-iso-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + }, + endpoints.EndpointKey{ + Region: "us-iso-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", + }, + endpoints.EndpointKey{ + Region: "us-iso-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "ProdFips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-isob-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-isob-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpoints.EndpointKey{ + Region: "us-isob-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "kms.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "kms-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "ProdFips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-gov-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go new file mode 100644 index 00000000000..39d8aaa2cee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go @@ -0,0 +1,4148 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyjson "github.com/aws/smithy-go/encoding/json" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "path" +) + +type awsAwsjson11_serializeOpCancelKeyDeletion struct { +} + +func (*awsAwsjson11_serializeOpCancelKeyDeletion) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCancelKeyDeletion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CancelKeyDeletionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.CancelKeyDeletion") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCancelKeyDeletionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpConnectCustomKeyStore struct { +} + +func (*awsAwsjson11_serializeOpConnectCustomKeyStore) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpConnectCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ConnectCustomKeyStoreInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ConnectCustomKeyStore") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentConnectCustomKeyStoreInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateAlias struct { +} + +func (*awsAwsjson11_serializeOpCreateAlias) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateAlias) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateAliasInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.CreateAlias") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateAliasInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateCustomKeyStore struct { +} + +func (*awsAwsjson11_serializeOpCreateCustomKeyStore) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateCustomKeyStoreInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.CreateCustomKeyStore") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateCustomKeyStoreInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateGrant struct { +} + +func (*awsAwsjson11_serializeOpCreateGrant) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateGrant) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateGrantInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.CreateGrant") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateGrantInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateKey struct { +} + +func (*awsAwsjson11_serializeOpCreateKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.CreateKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDecrypt struct { +} + +func (*awsAwsjson11_serializeOpDecrypt) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDecrypt) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DecryptInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.Decrypt") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDecryptInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteAlias struct { +} + +func (*awsAwsjson11_serializeOpDeleteAlias) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteAlias) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteAliasInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DeleteAlias") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteAliasInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteCustomKeyStore struct { +} + +func (*awsAwsjson11_serializeOpDeleteCustomKeyStore) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteCustomKeyStoreInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DeleteCustomKeyStore") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteCustomKeyStoreInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteImportedKeyMaterial struct { +} + +func (*awsAwsjson11_serializeOpDeleteImportedKeyMaterial) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteImportedKeyMaterial) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteImportedKeyMaterialInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DeleteImportedKeyMaterial") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteImportedKeyMaterialInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeCustomKeyStores struct { +} + +func (*awsAwsjson11_serializeOpDescribeCustomKeyStores) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeCustomKeyStores) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeCustomKeyStoresInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DescribeCustomKeyStores") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeCustomKeyStoresInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeKey struct { +} + +func (*awsAwsjson11_serializeOpDescribeKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DescribeKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDisableKey struct { +} + +func (*awsAwsjson11_serializeOpDisableKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDisableKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DisableKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DisableKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDisableKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDisableKeyRotation struct { +} + +func (*awsAwsjson11_serializeOpDisableKeyRotation) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDisableKeyRotation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DisableKeyRotationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DisableKeyRotation") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDisableKeyRotationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDisconnectCustomKeyStore struct { +} + +func (*awsAwsjson11_serializeOpDisconnectCustomKeyStore) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDisconnectCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DisconnectCustomKeyStoreInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DisconnectCustomKeyStore") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDisconnectCustomKeyStoreInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpEnableKey struct { +} + +func (*awsAwsjson11_serializeOpEnableKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpEnableKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*EnableKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.EnableKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentEnableKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpEnableKeyRotation struct { +} + +func (*awsAwsjson11_serializeOpEnableKeyRotation) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpEnableKeyRotation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*EnableKeyRotationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.EnableKeyRotation") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentEnableKeyRotationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpEncrypt struct { +} + +func (*awsAwsjson11_serializeOpEncrypt) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpEncrypt) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*EncryptInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.Encrypt") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentEncryptInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateDataKey struct { +} + +func (*awsAwsjson11_serializeOpGenerateDataKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateDataKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateDataKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateDataKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateDataKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateDataKeyPair struct { +} + +func (*awsAwsjson11_serializeOpGenerateDataKeyPair) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateDataKeyPair) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateDataKeyPairInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateDataKeyPair") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateDataKeyPairInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext struct { +} + +func (*awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateDataKeyPairWithoutPlaintextInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateDataKeyPairWithoutPlaintext") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateDataKeyPairWithoutPlaintextInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext struct { +} + +func (*awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateDataKeyWithoutPlaintextInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateDataKeyWithoutPlaintext") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateDataKeyWithoutPlaintextInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateMac struct { +} + +func (*awsAwsjson11_serializeOpGenerateMac) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateMac) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateMacInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateMac") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateMacInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateRandom struct { +} + +func (*awsAwsjson11_serializeOpGenerateRandom) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateRandom) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateRandomInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateRandom") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateRandomInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetKeyPolicy struct { +} + +func (*awsAwsjson11_serializeOpGetKeyPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetKeyPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetKeyPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GetKeyPolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetKeyPolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetKeyRotationStatus struct { +} + +func (*awsAwsjson11_serializeOpGetKeyRotationStatus) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetKeyRotationStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetKeyRotationStatusInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GetKeyRotationStatus") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetKeyRotationStatusInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetParametersForImport struct { +} + +func (*awsAwsjson11_serializeOpGetParametersForImport) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetParametersForImport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetParametersForImportInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GetParametersForImport") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetParametersForImportInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetPublicKey struct { +} + +func (*awsAwsjson11_serializeOpGetPublicKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetPublicKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetPublicKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GetPublicKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetPublicKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpImportKeyMaterial struct { +} + +func (*awsAwsjson11_serializeOpImportKeyMaterial) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpImportKeyMaterial) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ImportKeyMaterialInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ImportKeyMaterial") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentImportKeyMaterialInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListAliases struct { +} + +func (*awsAwsjson11_serializeOpListAliases) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListAliases) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListAliasesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListAliases") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListAliasesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListGrants struct { +} + +func (*awsAwsjson11_serializeOpListGrants) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListGrants) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListGrantsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListGrants") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListGrantsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListKeyPolicies struct { +} + +func (*awsAwsjson11_serializeOpListKeyPolicies) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListKeyPolicies) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListKeyPoliciesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListKeyPolicies") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListKeyPoliciesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListKeys struct { +} + +func (*awsAwsjson11_serializeOpListKeys) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListKeys) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListKeysInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListKeys") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListKeysInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListResourceTags struct { +} + +func (*awsAwsjson11_serializeOpListResourceTags) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListResourceTags) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListResourceTagsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListResourceTags") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListResourceTagsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListRetirableGrants struct { +} + +func (*awsAwsjson11_serializeOpListRetirableGrants) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListRetirableGrants) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListRetirableGrantsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListRetirableGrants") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListRetirableGrantsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpPutKeyPolicy struct { +} + +func (*awsAwsjson11_serializeOpPutKeyPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutKeyPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutKeyPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.PutKeyPolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutKeyPolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpReEncrypt struct { +} + +func (*awsAwsjson11_serializeOpReEncrypt) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpReEncrypt) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ReEncryptInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ReEncrypt") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentReEncryptInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpReplicateKey struct { +} + +func (*awsAwsjson11_serializeOpReplicateKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpReplicateKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ReplicateKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ReplicateKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentReplicateKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpRetireGrant struct { +} + +func (*awsAwsjson11_serializeOpRetireGrant) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpRetireGrant) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RetireGrantInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.RetireGrant") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentRetireGrantInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpRevokeGrant struct { +} + +func (*awsAwsjson11_serializeOpRevokeGrant) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpRevokeGrant) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RevokeGrantInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.RevokeGrant") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentRevokeGrantInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpScheduleKeyDeletion struct { +} + +func (*awsAwsjson11_serializeOpScheduleKeyDeletion) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpScheduleKeyDeletion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ScheduleKeyDeletionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ScheduleKeyDeletion") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentScheduleKeyDeletionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpSign struct { +} + +func (*awsAwsjson11_serializeOpSign) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpSign) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SignInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.Sign") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentSignInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpTagResource struct { +} + +func (*awsAwsjson11_serializeOpTagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*TagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.TagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUntagResource struct { +} + +func (*awsAwsjson11_serializeOpUntagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UntagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.UntagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUntagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateAlias struct { +} + +func (*awsAwsjson11_serializeOpUpdateAlias) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateAlias) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateAliasInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.UpdateAlias") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateAliasInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateCustomKeyStore struct { +} + +func (*awsAwsjson11_serializeOpUpdateCustomKeyStore) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateCustomKeyStoreInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.UpdateCustomKeyStore") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateCustomKeyStoreInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateKeyDescription struct { +} + +func (*awsAwsjson11_serializeOpUpdateKeyDescription) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateKeyDescription) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateKeyDescriptionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.UpdateKeyDescription") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateKeyDescriptionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdatePrimaryRegion struct { +} + +func (*awsAwsjson11_serializeOpUpdatePrimaryRegion) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdatePrimaryRegion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdatePrimaryRegionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.UpdatePrimaryRegion") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdatePrimaryRegionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpVerify struct { +} + +func (*awsAwsjson11_serializeOpVerify) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpVerify) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*VerifyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.Verify") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentVerifyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpVerifyMac struct { +} + +func (*awsAwsjson11_serializeOpVerifyMac) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpVerifyMac) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*VerifyMacInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.VerifyMac") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentVerifyMacInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsAwsjson11_serializeDocumentEncryptionContextType(v map[string]string, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + om.String(v[key]) + } + return nil +} + +func awsAwsjson11_serializeDocumentGrantConstraints(v *types.GrantConstraints, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EncryptionContextEquals != nil { + ok := object.Key("EncryptionContextEquals") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContextEquals, ok); err != nil { + return err + } + } + + if v.EncryptionContextSubset != nil { + ok := object.Key("EncryptionContextSubset") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContextSubset, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentGrantOperationList(v []types.GrantOperation, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + +func awsAwsjson11_serializeDocumentGrantTokenList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentTag(v *types.Tag, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.TagKey != nil { + ok := object.Key("TagKey") + ok.String(*v.TagKey) + } + + if v.TagValue != nil { + ok := object.Key("TagValue") + ok.String(*v.TagValue) + } + + return nil +} + +func awsAwsjson11_serializeDocumentTagKeyList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentTagList(v []types.Tag, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentTag(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentXksProxyAuthenticationCredentialType(v *types.XksProxyAuthenticationCredentialType, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AccessKeyId != nil { + ok := object.Key("AccessKeyId") + ok.String(*v.AccessKeyId) + } + + if v.RawSecretAccessKey != nil { + ok := object.Key("RawSecretAccessKey") + ok.String(*v.RawSecretAccessKey) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCancelKeyDeletionInput(v *CancelKeyDeletionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentConnectCustomKeyStoreInput(v *ConnectCustomKeyStoreInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateAliasInput(v *CreateAliasInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AliasName != nil { + ok := object.Key("AliasName") + ok.String(*v.AliasName) + } + + if v.TargetKeyId != nil { + ok := object.Key("TargetKeyId") + ok.String(*v.TargetKeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateCustomKeyStoreInput(v *CreateCustomKeyStoreInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CloudHsmClusterId != nil { + ok := object.Key("CloudHsmClusterId") + ok.String(*v.CloudHsmClusterId) + } + + if v.CustomKeyStoreName != nil { + ok := object.Key("CustomKeyStoreName") + ok.String(*v.CustomKeyStoreName) + } + + if len(v.CustomKeyStoreType) > 0 { + ok := object.Key("CustomKeyStoreType") + ok.String(string(v.CustomKeyStoreType)) + } + + if v.KeyStorePassword != nil { + ok := object.Key("KeyStorePassword") + ok.String(*v.KeyStorePassword) + } + + if v.TrustAnchorCertificate != nil { + ok := object.Key("TrustAnchorCertificate") + ok.String(*v.TrustAnchorCertificate) + } + + if v.XksProxyAuthenticationCredential != nil { + ok := object.Key("XksProxyAuthenticationCredential") + if err := awsAwsjson11_serializeDocumentXksProxyAuthenticationCredentialType(v.XksProxyAuthenticationCredential, ok); err != nil { + return err + } + } + + if len(v.XksProxyConnectivity) > 0 { + ok := object.Key("XksProxyConnectivity") + ok.String(string(v.XksProxyConnectivity)) + } + + if v.XksProxyUriEndpoint != nil { + ok := object.Key("XksProxyUriEndpoint") + ok.String(*v.XksProxyUriEndpoint) + } + + if v.XksProxyUriPath != nil { + ok := object.Key("XksProxyUriPath") + ok.String(*v.XksProxyUriPath) + } + + if v.XksProxyVpcEndpointServiceName != nil { + ok := object.Key("XksProxyVpcEndpointServiceName") + ok.String(*v.XksProxyVpcEndpointServiceName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateGrantInput(v *CreateGrantInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Constraints != nil { + ok := object.Key("Constraints") + if err := awsAwsjson11_serializeDocumentGrantConstraints(v.Constraints, ok); err != nil { + return err + } + } + + if v.GranteePrincipal != nil { + ok := object.Key("GranteePrincipal") + ok.String(*v.GranteePrincipal) + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.Operations != nil { + ok := object.Key("Operations") + if err := awsAwsjson11_serializeDocumentGrantOperationList(v.Operations, ok); err != nil { + return err + } + } + + if v.RetiringPrincipal != nil { + ok := object.Key("RetiringPrincipal") + ok.String(*v.RetiringPrincipal) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateKeyInput(v *CreateKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BypassPolicyLockoutSafetyCheck { + ok := object.Key("BypassPolicyLockoutSafetyCheck") + ok.Boolean(v.BypassPolicyLockoutSafetyCheck) + } + + if len(v.CustomerMasterKeySpec) > 0 { + ok := object.Key("CustomerMasterKeySpec") + ok.String(string(v.CustomerMasterKeySpec)) + } + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if len(v.KeySpec) > 0 { + ok := object.Key("KeySpec") + ok.String(string(v.KeySpec)) + } + + if len(v.KeyUsage) > 0 { + ok := object.Key("KeyUsage") + ok.String(string(v.KeyUsage)) + } + + if v.MultiRegion != nil { + ok := object.Key("MultiRegion") + ok.Boolean(*v.MultiRegion) + } + + if len(v.Origin) > 0 { + ok := object.Key("Origin") + ok.String(string(v.Origin)) + } + + if v.Policy != nil { + ok := object.Key("Policy") + ok.String(*v.Policy) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + if v.XksKeyId != nil { + ok := object.Key("XksKeyId") + ok.String(*v.XksKeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDecryptInput(v *DecryptInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CiphertextBlob != nil { + ok := object.Key("CiphertextBlob") + ok.Base64EncodeBytes(v.CiphertextBlob) + } + + if len(v.EncryptionAlgorithm) > 0 { + ok := object.Key("EncryptionAlgorithm") + ok.String(string(v.EncryptionAlgorithm)) + } + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteAliasInput(v *DeleteAliasInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AliasName != nil { + ok := object.Key("AliasName") + ok.String(*v.AliasName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteCustomKeyStoreInput(v *DeleteCustomKeyStoreInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteImportedKeyMaterialInput(v *DeleteImportedKeyMaterialInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeCustomKeyStoresInput(v *DescribeCustomKeyStoresInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + if v.CustomKeyStoreName != nil { + ok := object.Key("CustomKeyStoreName") + ok.String(*v.CustomKeyStoreName) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeKeyInput(v *DescribeKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDisableKeyInput(v *DisableKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDisableKeyRotationInput(v *DisableKeyRotationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDisconnectCustomKeyStoreInput(v *DisconnectCustomKeyStoreInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentEnableKeyInput(v *EnableKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentEnableKeyRotationInput(v *EnableKeyRotationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentEncryptInput(v *EncryptInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.EncryptionAlgorithm) > 0 { + ok := object.Key("EncryptionAlgorithm") + ok.String(string(v.EncryptionAlgorithm)) + } + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Plaintext != nil { + ok := object.Key("Plaintext") + ok.Base64EncodeBytes(v.Plaintext) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateDataKeyInput(v *GenerateDataKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.KeySpec) > 0 { + ok := object.Key("KeySpec") + ok.String(string(v.KeySpec)) + } + + if v.NumberOfBytes != nil { + ok := object.Key("NumberOfBytes") + ok.Integer(*v.NumberOfBytes) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateDataKeyPairInput(v *GenerateDataKeyPairInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.KeyPairSpec) > 0 { + ok := object.Key("KeyPairSpec") + ok.String(string(v.KeyPairSpec)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateDataKeyPairWithoutPlaintextInput(v *GenerateDataKeyPairWithoutPlaintextInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.KeyPairSpec) > 0 { + ok := object.Key("KeyPairSpec") + ok.String(string(v.KeyPairSpec)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateDataKeyWithoutPlaintextInput(v *GenerateDataKeyWithoutPlaintextInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.KeySpec) > 0 { + ok := object.Key("KeySpec") + ok.String(string(v.KeySpec)) + } + + if v.NumberOfBytes != nil { + ok := object.Key("NumberOfBytes") + ok.Integer(*v.NumberOfBytes) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateMacInput(v *GenerateMacInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.MacAlgorithm) > 0 { + ok := object.Key("MacAlgorithm") + ok.String(string(v.MacAlgorithm)) + } + + if v.Message != nil { + ok := object.Key("Message") + ok.Base64EncodeBytes(v.Message) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateRandomInput(v *GenerateRandomInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + if v.NumberOfBytes != nil { + ok := object.Key("NumberOfBytes") + ok.Integer(*v.NumberOfBytes) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetKeyPolicyInput(v *GetKeyPolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.PolicyName != nil { + ok := object.Key("PolicyName") + ok.String(*v.PolicyName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetKeyRotationStatusInput(v *GetKeyRotationStatusInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetParametersForImportInput(v *GetParametersForImportInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.WrappingAlgorithm) > 0 { + ok := object.Key("WrappingAlgorithm") + ok.String(string(v.WrappingAlgorithm)) + } + + if len(v.WrappingKeySpec) > 0 { + ok := object.Key("WrappingKeySpec") + ok.String(string(v.WrappingKeySpec)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetPublicKeyInput(v *GetPublicKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentImportKeyMaterialInput(v *ImportKeyMaterialInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EncryptedKeyMaterial != nil { + ok := object.Key("EncryptedKeyMaterial") + ok.Base64EncodeBytes(v.EncryptedKeyMaterial) + } + + if len(v.ExpirationModel) > 0 { + ok := object.Key("ExpirationModel") + ok.String(string(v.ExpirationModel)) + } + + if v.ImportToken != nil { + ok := object.Key("ImportToken") + ok.Base64EncodeBytes(v.ImportToken) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.ValidTo != nil { + ok := object.Key("ValidTo") + ok.Double(smithytime.FormatEpochSeconds(*v.ValidTo)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListAliasesInput(v *ListAliasesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListGrantsInput(v *ListGrantsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GranteePrincipal != nil { + ok := object.Key("GranteePrincipal") + ok.String(*v.GranteePrincipal) + } + + if v.GrantId != nil { + ok := object.Key("GrantId") + ok.String(*v.GrantId) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListKeyPoliciesInput(v *ListKeyPoliciesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListKeysInput(v *ListKeysInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListResourceTagsInput(v *ListResourceTagsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListRetirableGrantsInput(v *ListRetirableGrantsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + if v.RetiringPrincipal != nil { + ok := object.Key("RetiringPrincipal") + ok.String(*v.RetiringPrincipal) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutKeyPolicyInput(v *PutKeyPolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BypassPolicyLockoutSafetyCheck { + ok := object.Key("BypassPolicyLockoutSafetyCheck") + ok.Boolean(v.BypassPolicyLockoutSafetyCheck) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Policy != nil { + ok := object.Key("Policy") + ok.String(*v.Policy) + } + + if v.PolicyName != nil { + ok := object.Key("PolicyName") + ok.String(*v.PolicyName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentReEncryptInput(v *ReEncryptInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CiphertextBlob != nil { + ok := object.Key("CiphertextBlob") + ok.Base64EncodeBytes(v.CiphertextBlob) + } + + if len(v.DestinationEncryptionAlgorithm) > 0 { + ok := object.Key("DestinationEncryptionAlgorithm") + ok.String(string(v.DestinationEncryptionAlgorithm)) + } + + if v.DestinationEncryptionContext != nil { + ok := object.Key("DestinationEncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.DestinationEncryptionContext, ok); err != nil { + return err + } + } + + if v.DestinationKeyId != nil { + ok := object.Key("DestinationKeyId") + ok.String(*v.DestinationKeyId) + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if len(v.SourceEncryptionAlgorithm) > 0 { + ok := object.Key("SourceEncryptionAlgorithm") + ok.String(string(v.SourceEncryptionAlgorithm)) + } + + if v.SourceEncryptionContext != nil { + ok := object.Key("SourceEncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.SourceEncryptionContext, ok); err != nil { + return err + } + } + + if v.SourceKeyId != nil { + ok := object.Key("SourceKeyId") + ok.String(*v.SourceKeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentReplicateKeyInput(v *ReplicateKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BypassPolicyLockoutSafetyCheck { + ok := object.Key("BypassPolicyLockoutSafetyCheck") + ok.Boolean(v.BypassPolicyLockoutSafetyCheck) + } + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Policy != nil { + ok := object.Key("Policy") + ok.String(*v.Policy) + } + + if v.ReplicaRegion != nil { + ok := object.Key("ReplicaRegion") + ok.String(*v.ReplicaRegion) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentRetireGrantInput(v *RetireGrantInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantId != nil { + ok := object.Key("GrantId") + ok.String(*v.GrantId) + } + + if v.GrantToken != nil { + ok := object.Key("GrantToken") + ok.String(*v.GrantToken) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentRevokeGrantInput(v *RevokeGrantInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantId != nil { + ok := object.Key("GrantId") + ok.String(*v.GrantId) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentScheduleKeyDeletionInput(v *ScheduleKeyDeletionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.PendingWindowInDays != nil { + ok := object.Key("PendingWindowInDays") + ok.Integer(*v.PendingWindowInDays) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentSignInput(v *SignInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Message != nil { + ok := object.Key("Message") + ok.Base64EncodeBytes(v.Message) + } + + if len(v.MessageType) > 0 { + ok := object.Key("MessageType") + ok.String(string(v.MessageType)) + } + + if len(v.SigningAlgorithm) > 0 { + ok := object.Key("SigningAlgorithm") + ok.String(string(v.SigningAlgorithm)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentTagResourceInput(v *TagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUntagResourceInput(v *UntagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.TagKeys != nil { + ok := object.Key("TagKeys") + if err := awsAwsjson11_serializeDocumentTagKeyList(v.TagKeys, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateAliasInput(v *UpdateAliasInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AliasName != nil { + ok := object.Key("AliasName") + ok.String(*v.AliasName) + } + + if v.TargetKeyId != nil { + ok := object.Key("TargetKeyId") + ok.String(*v.TargetKeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateCustomKeyStoreInput(v *UpdateCustomKeyStoreInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CloudHsmClusterId != nil { + ok := object.Key("CloudHsmClusterId") + ok.String(*v.CloudHsmClusterId) + } + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + if v.KeyStorePassword != nil { + ok := object.Key("KeyStorePassword") + ok.String(*v.KeyStorePassword) + } + + if v.NewCustomKeyStoreName != nil { + ok := object.Key("NewCustomKeyStoreName") + ok.String(*v.NewCustomKeyStoreName) + } + + if v.XksProxyAuthenticationCredential != nil { + ok := object.Key("XksProxyAuthenticationCredential") + if err := awsAwsjson11_serializeDocumentXksProxyAuthenticationCredentialType(v.XksProxyAuthenticationCredential, ok); err != nil { + return err + } + } + + if len(v.XksProxyConnectivity) > 0 { + ok := object.Key("XksProxyConnectivity") + ok.String(string(v.XksProxyConnectivity)) + } + + if v.XksProxyUriEndpoint != nil { + ok := object.Key("XksProxyUriEndpoint") + ok.String(*v.XksProxyUriEndpoint) + } + + if v.XksProxyUriPath != nil { + ok := object.Key("XksProxyUriPath") + ok.String(*v.XksProxyUriPath) + } + + if v.XksProxyVpcEndpointServiceName != nil { + ok := object.Key("XksProxyVpcEndpointServiceName") + ok.String(*v.XksProxyVpcEndpointServiceName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateKeyDescriptionInput(v *UpdateKeyDescriptionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdatePrimaryRegionInput(v *UpdatePrimaryRegionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.PrimaryRegion != nil { + ok := object.Key("PrimaryRegion") + ok.String(*v.PrimaryRegion) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentVerifyInput(v *VerifyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Message != nil { + ok := object.Key("Message") + ok.Base64EncodeBytes(v.Message) + } + + if len(v.MessageType) > 0 { + ok := object.Key("MessageType") + ok.String(string(v.MessageType)) + } + + if v.Signature != nil { + ok := object.Key("Signature") + ok.Base64EncodeBytes(v.Signature) + } + + if len(v.SigningAlgorithm) > 0 { + ok := object.Key("SigningAlgorithm") + ok.String(string(v.SigningAlgorithm)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentVerifyMacInput(v *VerifyMacInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Mac != nil { + ok := object.Key("Mac") + ok.Base64EncodeBytes(v.Mac) + } + + if len(v.MacAlgorithm) > 0 { + ok := object.Key("MacAlgorithm") + ok.String(string(v.MacAlgorithm)) + } + + if v.Message != nil { + ok := object.Key("Message") + ok.Base64EncodeBytes(v.Message) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/enums.go new file mode 100644 index 00000000000..923e3abc0f9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/enums.go @@ -0,0 +1,545 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +type AlgorithmSpec string + +// Enum values for AlgorithmSpec +const ( + AlgorithmSpecRsaesPkcs1V15 AlgorithmSpec = "RSAES_PKCS1_V1_5" + AlgorithmSpecRsaesOaepSha1 AlgorithmSpec = "RSAES_OAEP_SHA_1" + AlgorithmSpecRsaesOaepSha256 AlgorithmSpec = "RSAES_OAEP_SHA_256" +) + +// Values returns all known values for AlgorithmSpec. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (AlgorithmSpec) Values() []AlgorithmSpec { + return []AlgorithmSpec{ + "RSAES_PKCS1_V1_5", + "RSAES_OAEP_SHA_1", + "RSAES_OAEP_SHA_256", + } +} + +type ConnectionErrorCodeType string + +// Enum values for ConnectionErrorCodeType +const ( + ConnectionErrorCodeTypeInvalidCredentials ConnectionErrorCodeType = "INVALID_CREDENTIALS" + ConnectionErrorCodeTypeClusterNotFound ConnectionErrorCodeType = "CLUSTER_NOT_FOUND" + ConnectionErrorCodeTypeNetworkErrors ConnectionErrorCodeType = "NETWORK_ERRORS" + ConnectionErrorCodeTypeInternalError ConnectionErrorCodeType = "INTERNAL_ERROR" + ConnectionErrorCodeTypeInsufficientCloudhsmHsms ConnectionErrorCodeType = "INSUFFICIENT_CLOUDHSM_HSMS" + ConnectionErrorCodeTypeUserLockedOut ConnectionErrorCodeType = "USER_LOCKED_OUT" + ConnectionErrorCodeTypeUserNotFound ConnectionErrorCodeType = "USER_NOT_FOUND" + ConnectionErrorCodeTypeUserLoggedIn ConnectionErrorCodeType = "USER_LOGGED_IN" + ConnectionErrorCodeTypeSubnetNotFound ConnectionErrorCodeType = "SUBNET_NOT_FOUND" + ConnectionErrorCodeTypeInsufficientFreeAddressesInSubnet ConnectionErrorCodeType = "INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET" + ConnectionErrorCodeTypeXksProxyAccessDenied ConnectionErrorCodeType = "XKS_PROXY_ACCESS_DENIED" + ConnectionErrorCodeTypeXksProxyNotReachable ConnectionErrorCodeType = "XKS_PROXY_NOT_REACHABLE" + ConnectionErrorCodeTypeXksVpcEndpointServiceNotFound ConnectionErrorCodeType = "XKS_VPC_ENDPOINT_SERVICE_NOT_FOUND" + ConnectionErrorCodeTypeXksProxyInvalidResponse ConnectionErrorCodeType = "XKS_PROXY_INVALID_RESPONSE" + ConnectionErrorCodeTypeXksProxyInvalidConfiguration ConnectionErrorCodeType = "XKS_PROXY_INVALID_CONFIGURATION" + ConnectionErrorCodeTypeXksVpcEndpointServiceInvalidConfiguration ConnectionErrorCodeType = "XKS_VPC_ENDPOINT_SERVICE_INVALID_CONFIGURATION" + ConnectionErrorCodeTypeXksProxyTimedOut ConnectionErrorCodeType = "XKS_PROXY_TIMED_OUT" + ConnectionErrorCodeTypeXksProxyInvalidTlsConfiguration ConnectionErrorCodeType = "XKS_PROXY_INVALID_TLS_CONFIGURATION" +) + +// Values returns all known values for ConnectionErrorCodeType. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ConnectionErrorCodeType) Values() []ConnectionErrorCodeType { + return []ConnectionErrorCodeType{ + "INVALID_CREDENTIALS", + "CLUSTER_NOT_FOUND", + "NETWORK_ERRORS", + "INTERNAL_ERROR", + "INSUFFICIENT_CLOUDHSM_HSMS", + "USER_LOCKED_OUT", + "USER_NOT_FOUND", + "USER_LOGGED_IN", + "SUBNET_NOT_FOUND", + "INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET", + "XKS_PROXY_ACCESS_DENIED", + "XKS_PROXY_NOT_REACHABLE", + "XKS_VPC_ENDPOINT_SERVICE_NOT_FOUND", + "XKS_PROXY_INVALID_RESPONSE", + "XKS_PROXY_INVALID_CONFIGURATION", + "XKS_VPC_ENDPOINT_SERVICE_INVALID_CONFIGURATION", + "XKS_PROXY_TIMED_OUT", + "XKS_PROXY_INVALID_TLS_CONFIGURATION", + } +} + +type ConnectionStateType string + +// Enum values for ConnectionStateType +const ( + ConnectionStateTypeConnected ConnectionStateType = "CONNECTED" + ConnectionStateTypeConnecting ConnectionStateType = "CONNECTING" + ConnectionStateTypeFailed ConnectionStateType = "FAILED" + ConnectionStateTypeDisconnected ConnectionStateType = "DISCONNECTED" + ConnectionStateTypeDisconnecting ConnectionStateType = "DISCONNECTING" +) + +// Values returns all known values for ConnectionStateType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ConnectionStateType) Values() []ConnectionStateType { + return []ConnectionStateType{ + "CONNECTED", + "CONNECTING", + "FAILED", + "DISCONNECTED", + "DISCONNECTING", + } +} + +type CustomerMasterKeySpec string + +// Enum values for CustomerMasterKeySpec +const ( + CustomerMasterKeySpecRsa2048 CustomerMasterKeySpec = "RSA_2048" + CustomerMasterKeySpecRsa3072 CustomerMasterKeySpec = "RSA_3072" + CustomerMasterKeySpecRsa4096 CustomerMasterKeySpec = "RSA_4096" + CustomerMasterKeySpecEccNistP256 CustomerMasterKeySpec = "ECC_NIST_P256" + CustomerMasterKeySpecEccNistP384 CustomerMasterKeySpec = "ECC_NIST_P384" + CustomerMasterKeySpecEccNistP521 CustomerMasterKeySpec = "ECC_NIST_P521" + CustomerMasterKeySpecEccSecgP256k1 CustomerMasterKeySpec = "ECC_SECG_P256K1" + CustomerMasterKeySpecSymmetricDefault CustomerMasterKeySpec = "SYMMETRIC_DEFAULT" + CustomerMasterKeySpecHmac224 CustomerMasterKeySpec = "HMAC_224" + CustomerMasterKeySpecHmac256 CustomerMasterKeySpec = "HMAC_256" + CustomerMasterKeySpecHmac384 CustomerMasterKeySpec = "HMAC_384" + CustomerMasterKeySpecHmac512 CustomerMasterKeySpec = "HMAC_512" + CustomerMasterKeySpecSm2 CustomerMasterKeySpec = "SM2" +) + +// Values returns all known values for CustomerMasterKeySpec. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (CustomerMasterKeySpec) Values() []CustomerMasterKeySpec { + return []CustomerMasterKeySpec{ + "RSA_2048", + "RSA_3072", + "RSA_4096", + "ECC_NIST_P256", + "ECC_NIST_P384", + "ECC_NIST_P521", + "ECC_SECG_P256K1", + "SYMMETRIC_DEFAULT", + "HMAC_224", + "HMAC_256", + "HMAC_384", + "HMAC_512", + "SM2", + } +} + +type CustomKeyStoreType string + +// Enum values for CustomKeyStoreType +const ( + CustomKeyStoreTypeAwsCloudhsm CustomKeyStoreType = "AWS_CLOUDHSM" + CustomKeyStoreTypeExternalKeyStore CustomKeyStoreType = "EXTERNAL_KEY_STORE" +) + +// Values returns all known values for CustomKeyStoreType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (CustomKeyStoreType) Values() []CustomKeyStoreType { + return []CustomKeyStoreType{ + "AWS_CLOUDHSM", + "EXTERNAL_KEY_STORE", + } +} + +type DataKeyPairSpec string + +// Enum values for DataKeyPairSpec +const ( + DataKeyPairSpecRsa2048 DataKeyPairSpec = "RSA_2048" + DataKeyPairSpecRsa3072 DataKeyPairSpec = "RSA_3072" + DataKeyPairSpecRsa4096 DataKeyPairSpec = "RSA_4096" + DataKeyPairSpecEccNistP256 DataKeyPairSpec = "ECC_NIST_P256" + DataKeyPairSpecEccNistP384 DataKeyPairSpec = "ECC_NIST_P384" + DataKeyPairSpecEccNistP521 DataKeyPairSpec = "ECC_NIST_P521" + DataKeyPairSpecEccSecgP256k1 DataKeyPairSpec = "ECC_SECG_P256K1" + DataKeyPairSpecSm2 DataKeyPairSpec = "SM2" +) + +// Values returns all known values for DataKeyPairSpec. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (DataKeyPairSpec) Values() []DataKeyPairSpec { + return []DataKeyPairSpec{ + "RSA_2048", + "RSA_3072", + "RSA_4096", + "ECC_NIST_P256", + "ECC_NIST_P384", + "ECC_NIST_P521", + "ECC_SECG_P256K1", + "SM2", + } +} + +type DataKeySpec string + +// Enum values for DataKeySpec +const ( + DataKeySpecAes256 DataKeySpec = "AES_256" + DataKeySpecAes128 DataKeySpec = "AES_128" +) + +// Values returns all known values for DataKeySpec. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (DataKeySpec) Values() []DataKeySpec { + return []DataKeySpec{ + "AES_256", + "AES_128", + } +} + +type EncryptionAlgorithmSpec string + +// Enum values for EncryptionAlgorithmSpec +const ( + EncryptionAlgorithmSpecSymmetricDefault EncryptionAlgorithmSpec = "SYMMETRIC_DEFAULT" + EncryptionAlgorithmSpecRsaesOaepSha1 EncryptionAlgorithmSpec = "RSAES_OAEP_SHA_1" + EncryptionAlgorithmSpecRsaesOaepSha256 EncryptionAlgorithmSpec = "RSAES_OAEP_SHA_256" + EncryptionAlgorithmSpecSm2pke EncryptionAlgorithmSpec = "SM2PKE" +) + +// Values returns all known values for EncryptionAlgorithmSpec. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (EncryptionAlgorithmSpec) Values() []EncryptionAlgorithmSpec { + return []EncryptionAlgorithmSpec{ + "SYMMETRIC_DEFAULT", + "RSAES_OAEP_SHA_1", + "RSAES_OAEP_SHA_256", + "SM2PKE", + } +} + +type ExpirationModelType string + +// Enum values for ExpirationModelType +const ( + ExpirationModelTypeKeyMaterialExpires ExpirationModelType = "KEY_MATERIAL_EXPIRES" + ExpirationModelTypeKeyMaterialDoesNotExpire ExpirationModelType = "KEY_MATERIAL_DOES_NOT_EXPIRE" +) + +// Values returns all known values for ExpirationModelType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ExpirationModelType) Values() []ExpirationModelType { + return []ExpirationModelType{ + "KEY_MATERIAL_EXPIRES", + "KEY_MATERIAL_DOES_NOT_EXPIRE", + } +} + +type GrantOperation string + +// Enum values for GrantOperation +const ( + GrantOperationDecrypt GrantOperation = "Decrypt" + GrantOperationEncrypt GrantOperation = "Encrypt" + GrantOperationGenerateDataKey GrantOperation = "GenerateDataKey" + GrantOperationGenerateDataKeyWithoutPlaintext GrantOperation = "GenerateDataKeyWithoutPlaintext" + GrantOperationReEncryptFrom GrantOperation = "ReEncryptFrom" + GrantOperationReEncryptTo GrantOperation = "ReEncryptTo" + GrantOperationSign GrantOperation = "Sign" + GrantOperationVerify GrantOperation = "Verify" + GrantOperationGetPublicKey GrantOperation = "GetPublicKey" + GrantOperationCreateGrant GrantOperation = "CreateGrant" + GrantOperationRetireGrant GrantOperation = "RetireGrant" + GrantOperationDescribeKey GrantOperation = "DescribeKey" + GrantOperationGenerateDataKeyPair GrantOperation = "GenerateDataKeyPair" + GrantOperationGenerateDataKeyPairWithoutPlaintext GrantOperation = "GenerateDataKeyPairWithoutPlaintext" + GrantOperationGenerateMac GrantOperation = "GenerateMac" + GrantOperationVerifyMac GrantOperation = "VerifyMac" +) + +// Values returns all known values for GrantOperation. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (GrantOperation) Values() []GrantOperation { + return []GrantOperation{ + "Decrypt", + "Encrypt", + "GenerateDataKey", + "GenerateDataKeyWithoutPlaintext", + "ReEncryptFrom", + "ReEncryptTo", + "Sign", + "Verify", + "GetPublicKey", + "CreateGrant", + "RetireGrant", + "DescribeKey", + "GenerateDataKeyPair", + "GenerateDataKeyPairWithoutPlaintext", + "GenerateMac", + "VerifyMac", + } +} + +type KeyManagerType string + +// Enum values for KeyManagerType +const ( + KeyManagerTypeAws KeyManagerType = "AWS" + KeyManagerTypeCustomer KeyManagerType = "CUSTOMER" +) + +// Values returns all known values for KeyManagerType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (KeyManagerType) Values() []KeyManagerType { + return []KeyManagerType{ + "AWS", + "CUSTOMER", + } +} + +type KeySpec string + +// Enum values for KeySpec +const ( + KeySpecRsa2048 KeySpec = "RSA_2048" + KeySpecRsa3072 KeySpec = "RSA_3072" + KeySpecRsa4096 KeySpec = "RSA_4096" + KeySpecEccNistP256 KeySpec = "ECC_NIST_P256" + KeySpecEccNistP384 KeySpec = "ECC_NIST_P384" + KeySpecEccNistP521 KeySpec = "ECC_NIST_P521" + KeySpecEccSecgP256k1 KeySpec = "ECC_SECG_P256K1" + KeySpecSymmetricDefault KeySpec = "SYMMETRIC_DEFAULT" + KeySpecHmac224 KeySpec = "HMAC_224" + KeySpecHmac256 KeySpec = "HMAC_256" + KeySpecHmac384 KeySpec = "HMAC_384" + KeySpecHmac512 KeySpec = "HMAC_512" + KeySpecSm2 KeySpec = "SM2" +) + +// Values returns all known values for KeySpec. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (KeySpec) Values() []KeySpec { + return []KeySpec{ + "RSA_2048", + "RSA_3072", + "RSA_4096", + "ECC_NIST_P256", + "ECC_NIST_P384", + "ECC_NIST_P521", + "ECC_SECG_P256K1", + "SYMMETRIC_DEFAULT", + "HMAC_224", + "HMAC_256", + "HMAC_384", + "HMAC_512", + "SM2", + } +} + +type KeyState string + +// Enum values for KeyState +const ( + KeyStateCreating KeyState = "Creating" + KeyStateEnabled KeyState = "Enabled" + KeyStateDisabled KeyState = "Disabled" + KeyStatePendingDeletion KeyState = "PendingDeletion" + KeyStatePendingImport KeyState = "PendingImport" + KeyStatePendingReplicaDeletion KeyState = "PendingReplicaDeletion" + KeyStateUnavailable KeyState = "Unavailable" + KeyStateUpdating KeyState = "Updating" +) + +// Values returns all known values for KeyState. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (KeyState) Values() []KeyState { + return []KeyState{ + "Creating", + "Enabled", + "Disabled", + "PendingDeletion", + "PendingImport", + "PendingReplicaDeletion", + "Unavailable", + "Updating", + } +} + +type KeyUsageType string + +// Enum values for KeyUsageType +const ( + KeyUsageTypeSignVerify KeyUsageType = "SIGN_VERIFY" + KeyUsageTypeEncryptDecrypt KeyUsageType = "ENCRYPT_DECRYPT" + KeyUsageTypeGenerateVerifyMac KeyUsageType = "GENERATE_VERIFY_MAC" +) + +// Values returns all known values for KeyUsageType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (KeyUsageType) Values() []KeyUsageType { + return []KeyUsageType{ + "SIGN_VERIFY", + "ENCRYPT_DECRYPT", + "GENERATE_VERIFY_MAC", + } +} + +type MacAlgorithmSpec string + +// Enum values for MacAlgorithmSpec +const ( + MacAlgorithmSpecHmacSha224 MacAlgorithmSpec = "HMAC_SHA_224" + MacAlgorithmSpecHmacSha256 MacAlgorithmSpec = "HMAC_SHA_256" + MacAlgorithmSpecHmacSha384 MacAlgorithmSpec = "HMAC_SHA_384" + MacAlgorithmSpecHmacSha512 MacAlgorithmSpec = "HMAC_SHA_512" +) + +// Values returns all known values for MacAlgorithmSpec. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (MacAlgorithmSpec) Values() []MacAlgorithmSpec { + return []MacAlgorithmSpec{ + "HMAC_SHA_224", + "HMAC_SHA_256", + "HMAC_SHA_384", + "HMAC_SHA_512", + } +} + +type MessageType string + +// Enum values for MessageType +const ( + MessageTypeRaw MessageType = "RAW" + MessageTypeDigest MessageType = "DIGEST" +) + +// Values returns all known values for MessageType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (MessageType) Values() []MessageType { + return []MessageType{ + "RAW", + "DIGEST", + } +} + +type MultiRegionKeyType string + +// Enum values for MultiRegionKeyType +const ( + MultiRegionKeyTypePrimary MultiRegionKeyType = "PRIMARY" + MultiRegionKeyTypeReplica MultiRegionKeyType = "REPLICA" +) + +// Values returns all known values for MultiRegionKeyType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (MultiRegionKeyType) Values() []MultiRegionKeyType { + return []MultiRegionKeyType{ + "PRIMARY", + "REPLICA", + } +} + +type OriginType string + +// Enum values for OriginType +const ( + OriginTypeAwsKms OriginType = "AWS_KMS" + OriginTypeExternal OriginType = "EXTERNAL" + OriginTypeAwsCloudhsm OriginType = "AWS_CLOUDHSM" + OriginTypeExternalKeyStore OriginType = "EXTERNAL_KEY_STORE" +) + +// Values returns all known values for OriginType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (OriginType) Values() []OriginType { + return []OriginType{ + "AWS_KMS", + "EXTERNAL", + "AWS_CLOUDHSM", + "EXTERNAL_KEY_STORE", + } +} + +type SigningAlgorithmSpec string + +// Enum values for SigningAlgorithmSpec +const ( + SigningAlgorithmSpecRsassaPssSha256 SigningAlgorithmSpec = "RSASSA_PSS_SHA_256" + SigningAlgorithmSpecRsassaPssSha384 SigningAlgorithmSpec = "RSASSA_PSS_SHA_384" + SigningAlgorithmSpecRsassaPssSha512 SigningAlgorithmSpec = "RSASSA_PSS_SHA_512" + SigningAlgorithmSpecRsassaPkcs1V15Sha256 SigningAlgorithmSpec = "RSASSA_PKCS1_V1_5_SHA_256" + SigningAlgorithmSpecRsassaPkcs1V15Sha384 SigningAlgorithmSpec = "RSASSA_PKCS1_V1_5_SHA_384" + SigningAlgorithmSpecRsassaPkcs1V15Sha512 SigningAlgorithmSpec = "RSASSA_PKCS1_V1_5_SHA_512" + SigningAlgorithmSpecEcdsaSha256 SigningAlgorithmSpec = "ECDSA_SHA_256" + SigningAlgorithmSpecEcdsaSha384 SigningAlgorithmSpec = "ECDSA_SHA_384" + SigningAlgorithmSpecEcdsaSha512 SigningAlgorithmSpec = "ECDSA_SHA_512" + SigningAlgorithmSpecSm2dsa SigningAlgorithmSpec = "SM2DSA" +) + +// Values returns all known values for SigningAlgorithmSpec. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (SigningAlgorithmSpec) Values() []SigningAlgorithmSpec { + return []SigningAlgorithmSpec{ + "RSASSA_PSS_SHA_256", + "RSASSA_PSS_SHA_384", + "RSASSA_PSS_SHA_512", + "RSASSA_PKCS1_V1_5_SHA_256", + "RSASSA_PKCS1_V1_5_SHA_384", + "RSASSA_PKCS1_V1_5_SHA_512", + "ECDSA_SHA_256", + "ECDSA_SHA_384", + "ECDSA_SHA_512", + "SM2DSA", + } +} + +type WrappingKeySpec string + +// Enum values for WrappingKeySpec +const ( + WrappingKeySpecRsa2048 WrappingKeySpec = "RSA_2048" +) + +// Values returns all known values for WrappingKeySpec. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (WrappingKeySpec) Values() []WrappingKeySpec { + return []WrappingKeySpec{ + "RSA_2048", + } +} + +type XksProxyConnectivityType string + +// Enum values for XksProxyConnectivityType +const ( + XksProxyConnectivityTypePublicEndpoint XksProxyConnectivityType = "PUBLIC_ENDPOINT" + XksProxyConnectivityTypeVpcEndpointService XksProxyConnectivityType = "VPC_ENDPOINT_SERVICE" +) + +// Values returns all known values for XksProxyConnectivityType. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (XksProxyConnectivityType) Values() []XksProxyConnectivityType { + return []XksProxyConnectivityType{ + "PUBLIC_ENDPOINT", + "VPC_ENDPOINT_SERVICE", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go new file mode 100644 index 00000000000..0bfaed5cb18 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go @@ -0,0 +1,1122 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// The request was rejected because it attempted to create a resource that already +// exists. +type AlreadyExistsException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *AlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *AlreadyExistsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *AlreadyExistsException) ErrorCode() string { return "AlreadyExistsException" } +func (e *AlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified CloudHSM cluster is already +// associated with an CloudHSM key store in the account, or it shares a backup +// history with an CloudHSM key store in the account. Each CloudHSM key store in +// the account must be associated with a different CloudHSM cluster. CloudHSM +// clusters that share a backup history have the same cluster certificate. To view +// the cluster certificate of an CloudHSM cluster, use the DescribeClusters +// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) +// operation. +type CloudHsmClusterInUseException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CloudHsmClusterInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CloudHsmClusterInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CloudHsmClusterInUseException) ErrorCode() string { return "CloudHsmClusterInUseException" } +func (e *CloudHsmClusterInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the associated CloudHSM cluster did not meet +// the configuration requirements for an CloudHSM key store. +// +// * The CloudHSM +// cluster must be configured with private subnets in at least two different +// Availability Zones in the Region. +// +// * The security group for the cluster +// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) +// (cloudhsm-cluster--sg) must include inbound rules and outbound rules that allow +// TCP traffic on ports 2223-2225. The Source in the inbound rules and the +// Destination in the outbound rules must match the security group ID. These rules +// are set by default when you create the CloudHSM cluster. Do not delete or change +// them. To get information about a particular security group, use the +// DescribeSecurityGroups +// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) +// operation. +// +// * The CloudHSM cluster must contain at least as many HSMs as the +// operation requires. To add HSMs, use the CloudHSM CreateHsm +// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) +// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey +// operations, the CloudHSM cluster must have at least two active HSMs, each in a +// different Availability Zone. For the ConnectCustomKeyStore operation, the +// CloudHSM must contain at least one active HSM. +// +// For information about the +// requirements for an CloudHSM cluster that is associated with an CloudHSM key +// store, see Assemble the Prerequisites +// (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) +// in the Key Management Service Developer Guide. For information about creating a +// private subnet for an CloudHSM cluster, see Create a Private Subnet +// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) in +// the CloudHSM User Guide. For information about cluster security groups, see +// Configure a Default Security Group +// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) in the +// CloudHSM User Guide . +type CloudHsmClusterInvalidConfigurationException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CloudHsmClusterInvalidConfigurationException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CloudHsmClusterInvalidConfigurationException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CloudHsmClusterInvalidConfigurationException) ErrorCode() string { + return "CloudHsmClusterInvalidConfigurationException" +} +func (e *CloudHsmClusterInvalidConfigurationException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The request was rejected because the CloudHSM cluster associated with the +// CloudHSM key store is not active. Initialize and activate the cluster and try +// the command again. For detailed instructions, see Getting Started +// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html) in +// the CloudHSM User Guide. +type CloudHsmClusterNotActiveException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CloudHsmClusterNotActiveException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CloudHsmClusterNotActiveException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CloudHsmClusterNotActiveException) ErrorCode() string { + return "CloudHsmClusterNotActiveException" +} +func (e *CloudHsmClusterNotActiveException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because KMS cannot find the CloudHSM cluster with the +// specified cluster ID. Retry the request with a different cluster ID. +type CloudHsmClusterNotFoundException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CloudHsmClusterNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CloudHsmClusterNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CloudHsmClusterNotFoundException) ErrorCode() string { + return "CloudHsmClusterNotFoundException" +} +func (e *CloudHsmClusterNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified CloudHSM cluster has a different +// cluster certificate than the original cluster. You cannot use the operation to +// specify an unrelated cluster for an CloudHSM key store. Specify an CloudHSM +// cluster that shares a backup history with the original cluster. This includes +// clusters that were created from a backup of the current cluster, and clusters +// that were created from the same backup that produced the current cluster. +// CloudHSM clusters that share a backup history have the same cluster certificate. +// To view the cluster certificate of an CloudHSM cluster, use the DescribeClusters +// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) +// operation. +type CloudHsmClusterNotRelatedException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CloudHsmClusterNotRelatedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CloudHsmClusterNotRelatedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CloudHsmClusterNotRelatedException) ErrorCode() string { + return "CloudHsmClusterNotRelatedException" +} +func (e *CloudHsmClusterNotRelatedException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The request was rejected because the custom key store contains KMS keys. After +// verifying that you do not need to use the KMS keys, use the ScheduleKeyDeletion +// operation to delete the KMS keys. After they are deleted, you can delete the +// custom key store. +type CustomKeyStoreHasCMKsException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CustomKeyStoreHasCMKsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CustomKeyStoreHasCMKsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CustomKeyStoreHasCMKsException) ErrorCode() string { return "CustomKeyStoreHasCMKsException" } +func (e *CustomKeyStoreHasCMKsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because of the ConnectionState of the custom key store. +// To get the ConnectionState of a custom key store, use the +// DescribeCustomKeyStores operation. This exception is thrown under the following +// conditions: +// +// * You requested the ConnectCustomKeyStore operation on a custom key +// store with a ConnectionState of DISCONNECTING or FAILED. This operation is valid +// for all other ConnectionState values. To reconnect a custom key store in a +// FAILED state, disconnect it (DisconnectCustomKeyStore), then connect it +// (ConnectCustomKeyStore). +// +// * You requested the CreateKey operation in a custom +// key store that is not connected. This operations is valid only when the custom +// key store ConnectionState is CONNECTED. +// +// * You requested the +// DisconnectCustomKeyStore operation on a custom key store with a ConnectionState +// of DISCONNECTING or DISCONNECTED. This operation is valid for all other +// ConnectionState values. +// +// * You requested the UpdateCustomKeyStore or +// DeleteCustomKeyStore operation on a custom key store that is not disconnected. +// This operation is valid only when the custom key store ConnectionState is +// DISCONNECTED. +// +// * You requested the GenerateRandom operation in an CloudHSM key +// store that is not connected. This operation is valid only when the CloudHSM key +// store ConnectionState is CONNECTED. +type CustomKeyStoreInvalidStateException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CustomKeyStoreInvalidStateException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CustomKeyStoreInvalidStateException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CustomKeyStoreInvalidStateException) ErrorCode() string { + return "CustomKeyStoreInvalidStateException" +} +func (e *CustomKeyStoreInvalidStateException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The request was rejected because the specified custom key store name is already +// assigned to another custom key store in the account. Try again with a custom key +// store name that is unique in the account. +type CustomKeyStoreNameInUseException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CustomKeyStoreNameInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CustomKeyStoreNameInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CustomKeyStoreNameInUseException) ErrorCode() string { + return "CustomKeyStoreNameInUseException" +} +func (e *CustomKeyStoreNameInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because KMS cannot find a custom key store with the +// specified key store name or ID. +type CustomKeyStoreNotFoundException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CustomKeyStoreNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CustomKeyStoreNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CustomKeyStoreNotFoundException) ErrorCode() string { + return "CustomKeyStoreNotFoundException" +} +func (e *CustomKeyStoreNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The system timed out while trying to fulfill the request. You can retry the +// request. +type DependencyTimeoutException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *DependencyTimeoutException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *DependencyTimeoutException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *DependencyTimeoutException) ErrorCode() string { return "DependencyTimeoutException" } +func (e *DependencyTimeoutException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// The request was rejected because the specified KMS key is not enabled. +type DisabledException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *DisabledException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *DisabledException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *DisabledException) ErrorCode() string { return "DisabledException" } +func (e *DisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified import token is expired. Use +// GetParametersForImport to get a new import token and public key, use the new +// public key to encrypt the key material, and then try the request again. +type ExpiredImportTokenException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ExpiredImportTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExpiredImportTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExpiredImportTokenException) ErrorCode() string { return "ExpiredImportTokenException" } +func (e *ExpiredImportTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified KMS key cannot decrypt the data. +// The KeyId in a Decrypt request and the SourceKeyId in a ReEncrypt request must +// identify the same KMS key that was used to encrypt the ciphertext. +type IncorrectKeyException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *IncorrectKeyException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IncorrectKeyException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IncorrectKeyException) ErrorCode() string { return "IncorrectKeyException" } +func (e *IncorrectKeyException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the key material in the request is, expired, +// invalid, or is not the same key material that was previously imported into this +// KMS key. +type IncorrectKeyMaterialException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *IncorrectKeyMaterialException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IncorrectKeyMaterialException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IncorrectKeyMaterialException) ErrorCode() string { return "IncorrectKeyMaterialException" } +func (e *IncorrectKeyMaterialException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the trust anchor certificate in the request to +// create an CloudHSM key store is not the trust anchor certificate for the +// specified CloudHSM cluster. When you initialize the CloudHSM cluster +// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr), +// you create the trust anchor certificate and save it in the customerCA.crt file. +type IncorrectTrustAnchorException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *IncorrectTrustAnchorException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IncorrectTrustAnchorException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IncorrectTrustAnchorException) ErrorCode() string { return "IncorrectTrustAnchorException" } +func (e *IncorrectTrustAnchorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified alias name is not valid. +type InvalidAliasNameException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidAliasNameException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidAliasNameException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidAliasNameException) ErrorCode() string { return "InvalidAliasNameException" } +func (e *InvalidAliasNameException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because a specified ARN, or an ARN in a key policy, is +// not valid. +type InvalidArnException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidArnException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidArnException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidArnException) ErrorCode() string { return "InvalidArnException" } +func (e *InvalidArnException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// From the Decrypt or ReEncrypt operation, the request was rejected because the +// specified ciphertext, or additional authenticated data incorporated into the +// ciphertext, such as the encryption context, is corrupted, missing, or otherwise +// invalid. From the ImportKeyMaterial operation, the request was rejected because +// KMS could not decrypt the encrypted (wrapped) key material. +type InvalidCiphertextException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidCiphertextException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidCiphertextException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidCiphertextException) ErrorCode() string { return "InvalidCiphertextException" } +func (e *InvalidCiphertextException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified GrantId is not valid. +type InvalidGrantIdException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidGrantIdException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidGrantIdException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidGrantIdException) ErrorCode() string { return "InvalidGrantIdException" } +func (e *InvalidGrantIdException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified grant token is not valid. +type InvalidGrantTokenException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidGrantTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidGrantTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidGrantTokenException) ErrorCode() string { return "InvalidGrantTokenException" } +func (e *InvalidGrantTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the provided import token is invalid or is +// associated with a different KMS key. +type InvalidImportTokenException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidImportTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidImportTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidImportTokenException) ErrorCode() string { return "InvalidImportTokenException" } +func (e *InvalidImportTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected for one of the following reasons: +// +// * The KeyUsage value +// of the KMS key is incompatible with the API operation. +// +// * The encryption +// algorithm or signing algorithm specified for the operation is incompatible with +// the type of key material in the KMS key (KeySpec). +// +// For encrypting, decrypting, +// re-encrypting, and generating data keys, the KeyUsage must be ENCRYPT_DECRYPT. +// For signing and verifying messages, the KeyUsage must be SIGN_VERIFY. For +// generating and verifying message authentication codes (MACs), the KeyUsage must +// be GENERATE_VERIFY_MAC. To find the KeyUsage of a KMS key, use the DescribeKey +// operation. To find the encryption or signing algorithms supported for a +// particular KMS key, use the DescribeKey operation. +type InvalidKeyUsageException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidKeyUsageException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidKeyUsageException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidKeyUsageException) ErrorCode() string { return "InvalidKeyUsageException" } +func (e *InvalidKeyUsageException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the marker that specifies where pagination +// should next begin is not valid. +type InvalidMarkerException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidMarkerException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidMarkerException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidMarkerException) ErrorCode() string { return "InvalidMarkerException" } +func (e *InvalidMarkerException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified KMS key was not available. You +// can retry the request. +type KeyUnavailableException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *KeyUnavailableException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *KeyUnavailableException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *KeyUnavailableException) ErrorCode() string { return "KeyUnavailableException" } +func (e *KeyUnavailableException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// The request was rejected because an internal exception occurred. The request can +// be retried. +type KMSInternalException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *KMSInternalException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *KMSInternalException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *KMSInternalException) ErrorCode() string { return "KMSInternalException" } +func (e *KMSInternalException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// The request was rejected because the HMAC verification failed. HMAC verification +// fails when the HMAC computed by using the specified message, HMAC KMS key, and +// MAC algorithm does not match the HMAC specified in the request. +type KMSInvalidMacException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *KMSInvalidMacException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *KMSInvalidMacException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *KMSInvalidMacException) ErrorCode() string { return "KMSInvalidMacException" } +func (e *KMSInvalidMacException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the signature verification failed. Signature +// verification fails when it cannot confirm that signature was produced by signing +// the specified message with the specified KMS key and signing algorithm. +type KMSInvalidSignatureException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *KMSInvalidSignatureException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *KMSInvalidSignatureException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *KMSInvalidSignatureException) ErrorCode() string { return "KMSInvalidSignatureException" } +func (e *KMSInvalidSignatureException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the state of the specified resource is not +// valid for this request. This exceptions means one of the following: +// +// * The key +// state of the KMS key is not compatible with the operation. To find the key +// state, use the DescribeKey operation. For more information about which key +// states are compatible with each KMS operation, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide . +// +// * For cryptographic operations on KMS +// keys in custom key stores, this exception represents a general failure with many +// possible causes. To identify the cause, see the error message that accompanies +// the exception. +type KMSInvalidStateException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *KMSInvalidStateException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *KMSInvalidStateException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *KMSInvalidStateException) ErrorCode() string { return "KMSInvalidStateException" } +func (e *KMSInvalidStateException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because a quota was exceeded. For more information, see +// Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) in +// the Key Management Service Developer Guide. +type LimitExceededException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *LimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *LimitExceededException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *LimitExceededException) ErrorCode() string { return "LimitExceededException" } +func (e *LimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified policy is not syntactically or +// semantically correct. +type MalformedPolicyDocumentException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *MalformedPolicyDocumentException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *MalformedPolicyDocumentException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *MalformedPolicyDocumentException) ErrorCode() string { + return "MalformedPolicyDocumentException" +} +func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified entity or resource could not be +// found. +type NotFoundException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *NotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NotFoundException) ErrorCode() string { return "NotFoundException" } +func (e *NotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because one or more tags are not valid. +type TagException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *TagException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TagException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TagException) ErrorCode() string { return "TagException" } +func (e *TagException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because a specified parameter is not supported or a +// specified resource is not valid for this operation. +type UnsupportedOperationException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *UnsupportedOperationException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnsupportedOperationException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnsupportedOperationException) ErrorCode() string { return "UnsupportedOperationException" } +func (e *UnsupportedOperationException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the (XksKeyId) is already associated with a KMS +// key in this external key store. Each KMS key in an external key store must be +// associated with a different external key. +type XksKeyAlreadyInUseException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *XksKeyAlreadyInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksKeyAlreadyInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksKeyAlreadyInUseException) ErrorCode() string { return "XksKeyAlreadyInUseException" } +func (e *XksKeyAlreadyInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the external key specified by the XksKeyId +// parameter did not meet the configuration requirements for an external key store. +// The external key must be an AES-256 symmetric key that is enabled and performs +// encryption and decryption. +type XksKeyInvalidConfigurationException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *XksKeyInvalidConfigurationException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksKeyInvalidConfigurationException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksKeyInvalidConfigurationException) ErrorCode() string { + return "XksKeyInvalidConfigurationException" +} +func (e *XksKeyInvalidConfigurationException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The request was rejected because the external key store proxy could not find the +// external key. This exception is thrown when the value of the XksKeyId parameter +// doesn't identify a key in the external key manager associated with the external +// key proxy. Verify that the XksKeyId represents an existing key in the external +// key manager. Use the key identifier that the external key store proxy uses to +// identify the key. For details, see the documentation provided with your external +// key store proxy or key manager. +type XksKeyNotFoundException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *XksKeyNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksKeyNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksKeyNotFoundException) ErrorCode() string { return "XksKeyNotFoundException" } +func (e *XksKeyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the proxy credentials failed to authenticate to +// the specified external key store proxy. The specified external key store proxy +// rejected a status request from KMS due to invalid credentials. This can indicate +// an error in the credentials or in the identification of the external key store +// proxy. +type XksProxyIncorrectAuthenticationCredentialException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *XksProxyIncorrectAuthenticationCredentialException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyIncorrectAuthenticationCredentialException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyIncorrectAuthenticationCredentialException) ErrorCode() string { + return "XksProxyIncorrectAuthenticationCredentialException" +} +func (e *XksProxyIncorrectAuthenticationCredentialException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The request was rejected because the Amazon VPC endpoint service configuration +// does not fulfill the requirements for an external key store proxy. For details, +// see the exception message. +type XksProxyInvalidConfigurationException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *XksProxyInvalidConfigurationException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyInvalidConfigurationException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyInvalidConfigurationException) ErrorCode() string { + return "XksProxyInvalidConfigurationException" +} +func (e *XksProxyInvalidConfigurationException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// KMS cannot interpret the response it received from the external key store proxy. +// The problem might be a poorly constructed response, but it could also be a +// transient network issue. If you see this error repeatedly, report it to the +// proxy vendor. +type XksProxyInvalidResponseException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *XksProxyInvalidResponseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyInvalidResponseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyInvalidResponseException) ErrorCode() string { + return "XksProxyInvalidResponseException" +} +func (e *XksProxyInvalidResponseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the concatenation of the XksProxyUriEndpoint is +// already associated with an external key store in the Amazon Web Services account +// and Region. Each external key store in an account and Region must use a unique +// external key store proxy address. +type XksProxyUriEndpointInUseException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *XksProxyUriEndpointInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyUriEndpointInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyUriEndpointInUseException) ErrorCode() string { + return "XksProxyUriEndpointInUseException" +} +func (e *XksProxyUriEndpointInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the concatenation of the XksProxyUriEndpoint +// and XksProxyUriPath is already associated with an external key store in the +// Amazon Web Services account and Region. Each external key store in an account +// and Region must use a unique external key store proxy API address. +type XksProxyUriInUseException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *XksProxyUriInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyUriInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyUriInUseException) ErrorCode() string { return "XksProxyUriInUseException" } +func (e *XksProxyUriInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// KMS was unable to reach the specified XksProxyUriPath. The path must be +// reachable before you create the external key store or update its settings. This +// exception is also thrown when the external key store proxy response to a +// GetHealthStatus request indicates that all external key manager instances are +// unavailable. +type XksProxyUriUnreachableException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *XksProxyUriUnreachableException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyUriUnreachableException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyUriUnreachableException) ErrorCode() string { + return "XksProxyUriUnreachableException" +} +func (e *XksProxyUriUnreachableException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified Amazon VPC endpoint service is +// already associated with an external key store in the Amazon Web Services account +// and Region. Each external key store in an Amazon Web Services account and Region +// must use a different Amazon VPC endpoint service. +type XksProxyVpcEndpointServiceInUseException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *XksProxyVpcEndpointServiceInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyVpcEndpointServiceInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyVpcEndpointServiceInUseException) ErrorCode() string { + return "XksProxyVpcEndpointServiceInUseException" +} +func (e *XksProxyVpcEndpointServiceInUseException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The request was rejected because the Amazon VPC endpoint service configuration +// does not fulfill the requirements for an external key store proxy. For details, +// see the exception message and review the requirements for Amazon VPC endpoint +// service connectivity for an external key store. +type XksProxyVpcEndpointServiceInvalidConfigurationException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *XksProxyVpcEndpointServiceInvalidConfigurationException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyVpcEndpointServiceInvalidConfigurationException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyVpcEndpointServiceInvalidConfigurationException) ErrorCode() string { + return "XksProxyVpcEndpointServiceInvalidConfigurationException" +} +func (e *XksProxyVpcEndpointServiceInvalidConfigurationException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The request was rejected because KMS could not find the specified VPC endpoint +// service. Use DescribeCustomKeyStores to verify the VPC endpoint service name for +// the external key store. Also, confirm that the Allow principals list for the VPC +// endpoint service includes the KMS service principal for the Region, such as +// cks.kms.us-east-1.amazonaws.com. +type XksProxyVpcEndpointServiceNotFoundException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *XksProxyVpcEndpointServiceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyVpcEndpointServiceNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyVpcEndpointServiceNotFoundException) ErrorCode() string { + return "XksProxyVpcEndpointServiceNotFoundException" +} +func (e *XksProxyVpcEndpointServiceNotFoundException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/types.go new file mode 100644 index 00000000000..4d971e912fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/types.go @@ -0,0 +1,630 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// Contains information about an alias. +type AliasListEntry struct { + + // String that contains the key ARN. + AliasArn *string + + // String that contains the alias. This value begins with alias/. + AliasName *string + + // Date and time that the alias was most recently created in the account and + // Region. Formatted as Unix time. + CreationDate *time.Time + + // Date and time that the alias was most recently associated with a KMS key in the + // account and Region. Formatted as Unix time. + LastUpdatedDate *time.Time + + // String that contains the key identifier of the KMS key associated with the + // alias. + TargetKeyId *string + + noSmithyDocumentSerde +} + +// Contains information about each custom key store in the custom key store list. +type CustomKeyStoresListEntry struct { + + // A unique identifier for the CloudHSM cluster that is associated with an CloudHSM + // key store. This field appears only when the CustomKeyStoreType is AWS_CLOUDHSM. + CloudHsmClusterId *string + + // Describes the connection error. This field appears in the response only when the + // ConnectionState is FAILED. Many failures can be resolved by updating the + // properties of the custom key store. To update a custom key store, disconnect it + // (DisconnectCustomKeyStore), correct the errors (UpdateCustomKeyStore), and try + // to connect again (ConnectCustomKeyStore). For additional help resolving these + // errors, see How to Fix a Connection Failure + // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) + // in Key Management Service Developer Guide. All custom key stores: + // + // * + // INTERNAL_ERROR — KMS could not complete the request due to an internal error. + // Retry the request. For ConnectCustomKeyStore requests, disconnect the custom key + // store before trying to connect again. + // + // * NETWORK_ERRORS — Network errors are + // preventing KMS from connecting the custom key store to its backing key + // store. + // + // CloudHSM key stores: + // + // * CLUSTER_NOT_FOUND — KMS cannot find the CloudHSM + // cluster with the specified cluster ID. + // + // * INSUFFICIENT_CLOUDHSM_HSMS — The + // associated CloudHSM cluster does not contain any active HSMs. To connect a + // custom key store to its CloudHSM cluster, the cluster must contain at least one + // active HSM. + // + // * INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET — At least one private + // subnet associated with the CloudHSM cluster doesn't have any available IP + // addresses. A CloudHSM key store connection requires one free IP address in each + // of the associated private subnets, although two are preferable. For details, see + // How to Fix a Connection Failure + // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) + // in the Key Management Service Developer Guide. + // + // * INVALID_CREDENTIALS — The + // KeyStorePassword for the custom key store doesn't match the current password of + // the kmsuser crypto user in the CloudHSM cluster. Before you can connect your + // custom key store to its CloudHSM cluster, you must change the kmsuser account + // password and update the KeyStorePassword value for the custom key store. + // + // * + // SUBNET_NOT_FOUND — A subnet in the CloudHSM cluster configuration was deleted. + // If KMS cannot find all of the subnets in the cluster configuration, attempts to + // connect the custom key store to the CloudHSM cluster fail. To fix this error, + // create a cluster from a recent backup and associate it with your custom key + // store. (This process creates a new cluster configuration with a VPC and private + // subnets.) For details, see How to Fix a Connection Failure + // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) + // in the Key Management Service Developer Guide. + // + // * USER_LOCKED_OUT — The kmsuser + // CU account is locked out of the associated CloudHSM cluster due to too many + // failed password attempts. Before you can connect your custom key store to its + // CloudHSM cluster, you must change the kmsuser account password and update the + // key store password value for the custom key store. + // + // * USER_LOGGED_IN — The + // kmsuser CU account is logged into the associated CloudHSM cluster. This prevents + // KMS from rotating the kmsuser account password and logging into the cluster. + // Before you can connect your custom key store to its CloudHSM cluster, you must + // log the kmsuser CU out of the cluster. If you changed the kmsuser password to + // log into the cluster, you must also and update the key store password value for + // the custom key store. For help, see How to Log Out and Reconnect + // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#login-kmsuser-2) + // in the Key Management Service Developer Guide. + // + // * USER_NOT_FOUND — KMS cannot + // find a kmsuser CU account in the associated CloudHSM cluster. Before you can + // connect your custom key store to its CloudHSM cluster, you must create a kmsuser + // CU account in the cluster, and then update the key store password value for the + // custom key store. + // + // External key stores: + // + // * INVALID_CREDENTIALS — One or both of + // the XksProxyAuthenticationCredential values is not valid on the specified + // external key store proxy. + // + // * XKS_PROXY_ACCESS_DENIED — KMS requests are denied + // access to the external key store proxy. If the external key store proxy has + // authorization rules, verify that they permit KMS to communicate with the proxy + // on your behalf. + // + // * XKS_PROXY_INVALID_CONFIGURATION — A configuration error is + // preventing the external key store from connecting to its proxy. Verify the value + // of the XksProxyUriPath. + // + // * XKS_PROXY_INVALID_RESPONSE — KMS cannot interpret the + // response from the external key store proxy. If you see this connection error + // code repeatedly, notify your external key store proxy vendor. + // + // * + // XKS_PROXY_INVALID_TLS_CONFIGURATION — KMS cannot connect to the external key + // store proxy because the TLS configuration is invalid. Verify that the XKS proxy + // supports TLS 1.2 or 1.3. Also, verify that the TLS certificate is not expired, + // and that it matches the hostname in the XksProxyUriEndpoint value, and that it + // is signed by a certificate authority included in the Trusted Certificate + // Authorities + // (https://github.com/aws/aws-kms-xksproxy-api-spec/blob/main/TrustedCertificateAuthorities) + // list. + // + // * XKS_PROXY_NOT_REACHABLE — KMS can't communicate with your external key + // store proxy. Verify that the XksProxyUriEndpoint and XksProxyUriPath are + // correct. Use the tools for your external key store proxy to verify that the + // proxy is active and available on its network. Also, verify that your external + // key manager instances are operating properly. Connection attempts fail with this + // connection error code if the proxy reports that all external key manager + // instances are unavailable. + // + // * XKS_PROXY_TIMED_OUT — KMS can connect to the + // external key store proxy, but the proxy does not respond to KMS in the time + // allotted. If you see this connection error code repeatedly, notify your external + // key store proxy vendor. + // + // * XKS_VPC_ENDPOINT_SERVICE_INVALID_CONFIGURATION — The + // Amazon VPC endpoint service configuration doesn't conform to the requirements + // for an KMS external key store. + // + // * The VPC endpoint service must be an endpoint + // service for interface endpoints in the caller's Amazon Web Services account. + // + // * + // It must have a network load balancer (NLB) connected to at least two subnets, + // each in a different Availability Zone. + // + // * The Allow principals list must include + // the KMS service principal for the Region, cks.kms..amazonaws.com, + // + // such as + // cks.kms.us-east-1.amazonaws.com. + // + // * It must not require acceptance + // (https://docs.aws.amazon.com/vpc/latest/privatelink/create-endpoint-service.html) + // of connection requests. + // + // * It must have a private DNS name. The private DNS name + // for an external key store with VPC_ENDPOINT_SERVICE connectivity must be unique + // in its Amazon Web Services Region. + // + // * The domain of the private DNS name must + // have a verification status + // (https://docs.aws.amazon.com/vpc/latest/privatelink/verify-domains.html) of + // verified. + // + // * The TLS certificate + // (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html) + // specifies the private DNS hostname at which the endpoint is reachable. + // + // * + // XKS_VPC_ENDPOINT_SERVICE_NOT_FOUND — KMS can't find the VPC endpoint service + // that it uses to communicate with the external key store proxy. Verify that the + // XksProxyVpcEndpointServiceName is correct and the KMS service principal has + // service consumer permissions on the Amazon VPC endpoint service. + ConnectionErrorCode ConnectionErrorCodeType + + // Indicates whether the custom key store is connected to its backing key store. + // For an CloudHSM key store, the ConnectionState indicates whether it is connected + // to its CloudHSM cluster. For an external key store, the ConnectionState + // indicates whether it is connected to the external key store proxy that + // communicates with your external key manager. You can create and use KMS keys in + // your custom key stores only when its ConnectionState is CONNECTED. The + // ConnectionState value is DISCONNECTED only if the key store has never been + // connected or you use the DisconnectCustomKeyStore operation to disconnect it. If + // the value is CONNECTED but you are having trouble using the custom key store, + // make sure that the backing key store is reachable and active. For an CloudHSM + // key store, verify that its associated CloudHSM cluster is active and contains at + // least one active HSM. For an external key store, verify that the external key + // store proxy and external key manager are connected and enabled. A value of + // FAILED indicates that an attempt to connect was unsuccessful. The + // ConnectionErrorCode field in the response indicates the cause of the failure. + // For help resolving a connection failure, see Troubleshooting a custom key store + // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) in the + // Key Management Service Developer Guide. + ConnectionState ConnectionStateType + + // The date and time when the custom key store was created. + CreationDate *time.Time + + // A unique identifier for the custom key store. + CustomKeyStoreId *string + + // The user-specified friendly name for the custom key store. + CustomKeyStoreName *string + + // Indicates the type of the custom key store. AWS_CLOUDHSM indicates a custom key + // store backed by an CloudHSM cluster. EXTERNAL_KEY_STORE indicates a custom key + // store backed by an external key store proxy and external key manager outside of + // Amazon Web Services. + CustomKeyStoreType CustomKeyStoreType + + // The trust anchor certificate of the CloudHSM cluster associated with an CloudHSM + // key store. When you initialize the cluster + // (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr), + // you create this certificate and save it in the customerCA.crt file. This field + // appears only when the CustomKeyStoreType is AWS_CLOUDHSM. + TrustAnchorCertificate *string + + // Configuration settings for the external key store proxy (XKS proxy). The + // external key store proxy translates KMS requests into a format that your + // external key manager can understand. The proxy configuration includes connection + // information that KMS requires. This field appears only when the + // CustomKeyStoreType is EXTERNAL_KEY_STORE. + XksProxyConfiguration *XksProxyConfigurationType + + noSmithyDocumentSerde +} + +// Use this structure to allow cryptographic operations +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// in the grant only when the operation request includes the specified encryption +// context +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context). +// KMS applies the grant constraints only to cryptographic operations that support +// an encryption context, that is, all cryptographic operations with a symmetric +// KMS key +// (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#symmetric-cmks). +// Grant constraints are not applied to operations that do not support an +// encryption context, such as cryptographic operations with asymmetric KMS keys +// and management operations, such as DescribeKey or RetireGrant. In a +// cryptographic operation, the encryption context in the decryption operation must +// be an exact, case-sensitive match for the keys and values in the encryption +// context of the encryption operation. Only the order of the pairs can vary. +// However, in a grant constraint, the key in each key-value pair is not case +// sensitive, but the value is case sensitive. To avoid confusion, do not use +// multiple encryption context pairs that differ only by case. To require a fully +// case-sensitive encryption context, use the kms:EncryptionContext: and +// kms:EncryptionContextKeys conditions in an IAM or key policy. For details, see +// kms:EncryptionContext: +// (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-context) +// in the Key Management Service Developer Guide . +type GrantConstraints struct { + + // A list of key-value pairs that must match the encryption context in the + // cryptographic operation + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // request. The grant allows the operation only when the encryption context in the + // request is the same as the encryption context specified in this constraint. + EncryptionContextEquals map[string]string + + // A list of key-value pairs that must be included in the encryption context of the + // cryptographic operation + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // request. The grant allows the cryptographic operation only when the encryption + // context in the request includes the key-value pairs specified in this + // constraint, although it can include additional key-value pairs. + EncryptionContextSubset map[string]string + + noSmithyDocumentSerde +} + +// Contains information about a grant. +type GrantListEntry struct { + + // A list of key-value pairs that must be present in the encryption context of + // certain subsequent operations that the grant allows. + Constraints *GrantConstraints + + // The date and time when the grant was created. + CreationDate *time.Time + + // The unique identifier for the grant. + GrantId *string + + // The identity that gets the permissions in the grant. The GranteePrincipal field + // in the ListGrants response usually contains the user or role designated as the + // grantee principal in the grant. However, when the grantee principal in the grant + // is an Amazon Web Services service, the GranteePrincipal field contains the + // service principal + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services), + // which might represent several different grantee principals. + GranteePrincipal *string + + // The Amazon Web Services account under which the grant was issued. + IssuingAccount *string + + // The unique identifier for the KMS key to which the grant applies. + KeyId *string + + // The friendly name that identifies the grant. If a name was provided in the + // CreateGrant request, that name is returned. Otherwise this value is null. + Name *string + + // The list of operations permitted by the grant. + Operations []GrantOperation + + // The principal that can retire the grant. + RetiringPrincipal *string + + noSmithyDocumentSerde +} + +// Contains information about each entry in the key list. +type KeyListEntry struct { + + // ARN of the key. + KeyArn *string + + // Unique identifier of the key. + KeyId *string + + noSmithyDocumentSerde +} + +// Contains metadata about a KMS key. This data type is used as a response element +// for the CreateKey, DescribeKey, and ReplicateKey operations. +type KeyMetadata struct { + + // The globally unique identifier for the KMS key. + // + // This member is required. + KeyId *string + + // The twelve-digit account ID of the Amazon Web Services account that owns the KMS + // key. + AWSAccountId *string + + // The Amazon Resource Name (ARN) of the KMS key. For examples, see Key Management + // Service (KMS) + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms) + // in the Example ARNs section of the Amazon Web Services General Reference. + Arn *string + + // The cluster ID of the CloudHSM cluster that contains the key material for the + // KMS key. When you create a KMS key in an CloudHSM custom key store + // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html), + // KMS creates the key material for the KMS key in the associated CloudHSM cluster. + // This field is present only when the KMS key is created in an CloudHSM key store. + CloudHsmClusterId *string + + // The date and time when the KMS key was created. + CreationDate *time.Time + + // A unique identifier for the custom key store + // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) + // that contains the KMS key. This field is present only when the KMS key is + // created in a custom key store. + CustomKeyStoreId *string + + // Instead, use the KeySpec field. The KeySpec and CustomerMasterKeySpec fields + // have the same value. We recommend that you use the KeySpec field in your code. + // However, to avoid breaking changes, KMS supports both fields. + // + // Deprecated: This field has been deprecated. Instead, use the KeySpec field. + CustomerMasterKeySpec CustomerMasterKeySpec + + // The date and time after which KMS deletes this KMS key. This value is present + // only when the KMS key is scheduled for deletion, that is, when its KeyState is + // PendingDeletion. When the primary key in a multi-Region key is scheduled for + // deletion but still has replica keys, its key state is PendingReplicaDeletion and + // the length of its waiting period is displayed in the PendingDeletionWindowInDays + // field. + DeletionDate *time.Time + + // The description of the KMS key. + Description *string + + // Specifies whether the KMS key is enabled. When KeyState is Enabled this value is + // true, otherwise it is false. + Enabled bool + + // The encryption algorithms that the KMS key supports. You cannot use the KMS key + // with other encryption algorithms within KMS. This value is present only when the + // KeyUsage of the KMS key is ENCRYPT_DECRYPT. + EncryptionAlgorithms []EncryptionAlgorithmSpec + + // Specifies whether the KMS key's key material expires. This value is present only + // when Origin is EXTERNAL, otherwise this value is omitted. + ExpirationModel ExpirationModelType + + // The manager of the KMS key. KMS keys in your Amazon Web Services account are + // either customer managed or Amazon Web Services managed. For more information + // about the difference, see KMS keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys) + // in the Key Management Service Developer Guide. + KeyManager KeyManagerType + + // Describes the type of key material in the KMS key. + KeySpec KeySpec + + // The current status of the KMS key. For more information about how key state + // affects the use of a KMS key, see Key states of KMS keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the + // Key Management Service Developer Guide. + KeyState KeyState + + // The cryptographic operations + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // for which you can use the KMS key. + KeyUsage KeyUsageType + + // The message authentication code (MAC) algorithm that the HMAC KMS key supports. + // This value is present only when the KeyUsage of the KMS key is + // GENERATE_VERIFY_MAC. + MacAlgorithms []MacAlgorithmSpec + + // Indicates whether the KMS key is a multi-Region (True) or regional (False) key. + // This value is True for multi-Region primary and replica keys and False for + // regional KMS keys. For more information about multi-Region keys, see + // Multi-Region keys in KMS + // (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) + // in the Key Management Service Developer Guide. + MultiRegion *bool + + // Lists the primary and replica keys in same multi-Region key. This field is + // present only when the value of the MultiRegion field is True. For more + // information about any listed KMS key, use the DescribeKey operation. + // + // * + // MultiRegionKeyType indicates whether the KMS key is a PRIMARY or REPLICA key. + // + // * + // PrimaryKey displays the key ARN and Region of the primary key. This field + // displays the current KMS key if it is the primary key. + // + // * ReplicaKeys displays + // the key ARNs and Regions of all replica keys. This field includes the current + // KMS key if it is a replica key. + MultiRegionConfiguration *MultiRegionConfiguration + + // The source of the key material for the KMS key. When this value is AWS_KMS, KMS + // created the key material. When this value is EXTERNAL, the key material was + // imported or the KMS key doesn't have any key material. When this value is + // AWS_CLOUDHSM, the key material was created in the CloudHSM cluster associated + // with a custom key store. + Origin OriginType + + // The waiting period before the primary key in a multi-Region key is deleted. This + // waiting period begins when the last of its replica keys is deleted. This value + // is present only when the KeyState of the KMS key is PendingReplicaDeletion. That + // indicates that the KMS key is the primary key in a multi-Region key, it is + // scheduled for deletion, and it still has existing replica keys. When a + // single-Region KMS key or a multi-Region replica key is scheduled for deletion, + // its deletion date is displayed in the DeletionDate field. However, when the + // primary key in a multi-Region key is scheduled for deletion, its waiting period + // doesn't begin until all of its replica keys are deleted. This value displays + // that waiting period. When the last replica key in the multi-Region key is + // deleted, the KeyState of the scheduled primary key changes from + // PendingReplicaDeletion to PendingDeletion and the deletion date appears in the + // DeletionDate field. + PendingDeletionWindowInDays *int32 + + // The signing algorithms that the KMS key supports. You cannot use the KMS key + // with other signing algorithms within KMS. This field appears only when the + // KeyUsage of the KMS key is SIGN_VERIFY. + SigningAlgorithms []SigningAlgorithmSpec + + // The time at which the imported key material expires. When the key material + // expires, KMS deletes the key material and the KMS key becomes unusable. This + // value is present only for KMS keys whose Origin is EXTERNAL and whose + // ExpirationModel is KEY_MATERIAL_EXPIRES, otherwise this value is omitted. + ValidTo *time.Time + + // Information about the external key that is associated with a KMS key in an + // external key store. For more information, see External key + // (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key) + // in the Key Management Service Developer Guide. + XksKeyConfiguration *XksKeyConfigurationType + + noSmithyDocumentSerde +} + +// Describes the configuration of this multi-Region key. This field appears only +// when the KMS key is a primary or replica of a multi-Region key. For more +// information about any listed KMS key, use the DescribeKey operation. +type MultiRegionConfiguration struct { + + // Indicates whether the KMS key is a PRIMARY or REPLICA key. + MultiRegionKeyType MultiRegionKeyType + + // Displays the key ARN and Region of the primary key. This field includes the + // current KMS key if it is the primary key. + PrimaryKey *MultiRegionKey + + // displays the key ARNs and Regions of all replica keys. This field includes the + // current KMS key if it is a replica key. + ReplicaKeys []MultiRegionKey + + noSmithyDocumentSerde +} + +// Describes the primary or replica key in a multi-Region key. +type MultiRegionKey struct { + + // Displays the key ARN of a primary or replica key of a multi-Region key. + Arn *string + + // Displays the Amazon Web Services Region of a primary or replica key in a + // multi-Region key. + Region *string + + noSmithyDocumentSerde +} + +// A key-value pair. A tag consists of a tag key and a tag value. Tag keys and tag +// values are both required, but tag values can be empty (null) strings. For +// information about the rules that apply to tag keys and tag values, see +// User-Defined Tag Restrictions +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) +// in the Amazon Web Services Billing and Cost Management User Guide. +type Tag struct { + + // The key of the tag. + // + // This member is required. + TagKey *string + + // The value of the tag. + // + // This member is required. + TagValue *string + + noSmithyDocumentSerde +} + +// Information about the external key +// (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key)that +// is associated with a KMS key in an external key store. This element appears in a +// CreateKey or DescribeKey response only for a KMS key in an external key store. +// The external key is a symmetric encryption key that is hosted by an external key +// manager outside of Amazon Web Services. When you use the KMS key in an external +// key store in a cryptographic operation, the cryptographic operation is performed +// in the external key manager using the specified external key. For more +// information, see External key +// (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key) +// in the Key Management Service Developer Guide. +type XksKeyConfigurationType struct { + + // The ID of the external key in its external key manager. This is the ID that the + // external key store proxy uses to identify the external key. + Id *string + + noSmithyDocumentSerde +} + +// KMS uses the authentication credential to sign requests that it sends to the +// external key store proxy (XKS proxy) on your behalf. You establish these +// credentials on your external key store proxy and report them to KMS. The +// XksProxyAuthenticationCredential includes two required elements. +type XksProxyAuthenticationCredentialType struct { + + // A unique identifier for the raw secret access key. + // + // This member is required. + AccessKeyId *string + + // A secret string of 43-64 characters. Valid characters are a-z, A-Z, 0-9, /, +, + // and =. + // + // This member is required. + RawSecretAccessKey *string + + noSmithyDocumentSerde +} + +// Detailed information about the external key store proxy (XKS proxy). Your +// external key store proxy translates KMS requests into a format that your +// external key manager can understand. These fields appear in a +// DescribeCustomKeyStores response only when the CustomKeyStoreType is +// EXTERNAL_KEY_STORE. +type XksProxyConfigurationType struct { + + // The part of the external key store proxy authentication credential + // (https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateCustomKeyStore.html#KMS-CreateCustomKeyStore-request-XksProxyAuthenticationCredential) + // that uniquely identifies the secret access key. + AccessKeyId *string + + // Indicates whether the external key store proxy uses a public endpoint or an + // Amazon VPC endpoint service to communicate with KMS. + Connectivity XksProxyConnectivityType + + // The URI endpoint for the external key store proxy. If the external key store + // proxy has a public endpoint, it is displayed here. If the external key store + // proxy uses an Amazon VPC endpoint service name, this field displays the private + // DNS name associated with the VPC endpoint service. + UriEndpoint *string + + // The path to the external key store proxy APIs. + UriPath *string + + // The Amazon VPC endpoint service used to communicate with the external key store + // proxy. This field appears only when the external key store proxy uses an Amazon + // VPC endpoint service to communicate with KMS. + VpcEndpointServiceName *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/validators.go new file mode 100644 index 00000000000..8f724d4b6af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/validators.go @@ -0,0 +1,1933 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpCancelKeyDeletion struct { +} + +func (*validateOpCancelKeyDeletion) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCancelKeyDeletion) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CancelKeyDeletionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCancelKeyDeletionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpConnectCustomKeyStore struct { +} + +func (*validateOpConnectCustomKeyStore) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpConnectCustomKeyStore) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ConnectCustomKeyStoreInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpConnectCustomKeyStoreInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateAlias struct { +} + +func (*validateOpCreateAlias) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateAlias) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateAliasInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateAliasInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateCustomKeyStore struct { +} + +func (*validateOpCreateCustomKeyStore) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateCustomKeyStore) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateCustomKeyStoreInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateCustomKeyStoreInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateGrant struct { +} + +func (*validateOpCreateGrant) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateGrant) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateGrantInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateGrantInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateKey struct { +} + +func (*validateOpCreateKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDecrypt struct { +} + +func (*validateOpDecrypt) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDecrypt) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DecryptInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDecryptInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteAlias struct { +} + +func (*validateOpDeleteAlias) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteAlias) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteAliasInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteAliasInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteCustomKeyStore struct { +} + +func (*validateOpDeleteCustomKeyStore) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteCustomKeyStore) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteCustomKeyStoreInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteCustomKeyStoreInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteImportedKeyMaterial struct { +} + +func (*validateOpDeleteImportedKeyMaterial) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteImportedKeyMaterial) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteImportedKeyMaterialInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteImportedKeyMaterialInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeKey struct { +} + +func (*validateOpDescribeKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDisableKey struct { +} + +func (*validateOpDisableKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDisableKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DisableKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDisableKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDisableKeyRotation struct { +} + +func (*validateOpDisableKeyRotation) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDisableKeyRotation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DisableKeyRotationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDisableKeyRotationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDisconnectCustomKeyStore struct { +} + +func (*validateOpDisconnectCustomKeyStore) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDisconnectCustomKeyStore) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DisconnectCustomKeyStoreInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDisconnectCustomKeyStoreInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpEnableKey struct { +} + +func (*validateOpEnableKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpEnableKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*EnableKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpEnableKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpEnableKeyRotation struct { +} + +func (*validateOpEnableKeyRotation) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpEnableKeyRotation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*EnableKeyRotationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpEnableKeyRotationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpEncrypt struct { +} + +func (*validateOpEncrypt) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpEncrypt) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*EncryptInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpEncryptInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGenerateDataKey struct { +} + +func (*validateOpGenerateDataKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGenerateDataKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GenerateDataKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGenerateDataKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGenerateDataKeyPair struct { +} + +func (*validateOpGenerateDataKeyPair) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGenerateDataKeyPair) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GenerateDataKeyPairInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGenerateDataKeyPairInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGenerateDataKeyPairWithoutPlaintext struct { +} + +func (*validateOpGenerateDataKeyPairWithoutPlaintext) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGenerateDataKeyPairWithoutPlaintext) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GenerateDataKeyPairWithoutPlaintextInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGenerateDataKeyPairWithoutPlaintextInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGenerateDataKeyWithoutPlaintext struct { +} + +func (*validateOpGenerateDataKeyWithoutPlaintext) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGenerateDataKeyWithoutPlaintext) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GenerateDataKeyWithoutPlaintextInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGenerateDataKeyWithoutPlaintextInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGenerateMac struct { +} + +func (*validateOpGenerateMac) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGenerateMac) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GenerateMacInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGenerateMacInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetKeyPolicy struct { +} + +func (*validateOpGetKeyPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetKeyPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetKeyPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetKeyPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetKeyRotationStatus struct { +} + +func (*validateOpGetKeyRotationStatus) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetKeyRotationStatus) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetKeyRotationStatusInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetKeyRotationStatusInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetParametersForImport struct { +} + +func (*validateOpGetParametersForImport) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetParametersForImport) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetParametersForImportInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetParametersForImportInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetPublicKey struct { +} + +func (*validateOpGetPublicKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetPublicKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetPublicKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetPublicKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpImportKeyMaterial struct { +} + +func (*validateOpImportKeyMaterial) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpImportKeyMaterial) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ImportKeyMaterialInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpImportKeyMaterialInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListGrants struct { +} + +func (*validateOpListGrants) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListGrants) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListGrantsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListGrantsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListKeyPolicies struct { +} + +func (*validateOpListKeyPolicies) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListKeyPolicies) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListKeyPoliciesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListKeyPoliciesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListResourceTags struct { +} + +func (*validateOpListResourceTags) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListResourceTags) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListResourceTagsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListResourceTagsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListRetirableGrants struct { +} + +func (*validateOpListRetirableGrants) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListRetirableGrants) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListRetirableGrantsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListRetirableGrantsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutKeyPolicy struct { +} + +func (*validateOpPutKeyPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutKeyPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutKeyPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutKeyPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpReEncrypt struct { +} + +func (*validateOpReEncrypt) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpReEncrypt) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ReEncryptInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpReEncryptInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpReplicateKey struct { +} + +func (*validateOpReplicateKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpReplicateKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ReplicateKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpReplicateKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRevokeGrant struct { +} + +func (*validateOpRevokeGrant) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRevokeGrant) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RevokeGrantInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRevokeGrantInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpScheduleKeyDeletion struct { +} + +func (*validateOpScheduleKeyDeletion) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpScheduleKeyDeletion) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ScheduleKeyDeletionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpScheduleKeyDeletionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpSign struct { +} + +func (*validateOpSign) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpSign) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*SignInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpSignInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpTagResource struct { +} + +func (*validateOpTagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpTagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*TagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpTagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUntagResource struct { +} + +func (*validateOpUntagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UntagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUntagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateAlias struct { +} + +func (*validateOpUpdateAlias) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateAlias) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateAliasInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateAliasInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateCustomKeyStore struct { +} + +func (*validateOpUpdateCustomKeyStore) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateCustomKeyStore) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateCustomKeyStoreInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateCustomKeyStoreInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateKeyDescription struct { +} + +func (*validateOpUpdateKeyDescription) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateKeyDescription) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateKeyDescriptionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateKeyDescriptionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdatePrimaryRegion struct { +} + +func (*validateOpUpdatePrimaryRegion) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdatePrimaryRegion) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdatePrimaryRegionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdatePrimaryRegionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpVerify struct { +} + +func (*validateOpVerify) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpVerify) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*VerifyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpVerifyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpVerifyMac struct { +} + +func (*validateOpVerifyMac) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpVerifyMac) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*VerifyMacInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpVerifyMacInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpCancelKeyDeletionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCancelKeyDeletion{}, middleware.After) +} + +func addOpConnectCustomKeyStoreValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpConnectCustomKeyStore{}, middleware.After) +} + +func addOpCreateAliasValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateAlias{}, middleware.After) +} + +func addOpCreateCustomKeyStoreValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateCustomKeyStore{}, middleware.After) +} + +func addOpCreateGrantValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateGrant{}, middleware.After) +} + +func addOpCreateKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateKey{}, middleware.After) +} + +func addOpDecryptValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDecrypt{}, middleware.After) +} + +func addOpDeleteAliasValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteAlias{}, middleware.After) +} + +func addOpDeleteCustomKeyStoreValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteCustomKeyStore{}, middleware.After) +} + +func addOpDeleteImportedKeyMaterialValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteImportedKeyMaterial{}, middleware.After) +} + +func addOpDescribeKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeKey{}, middleware.After) +} + +func addOpDisableKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDisableKey{}, middleware.After) +} + +func addOpDisableKeyRotationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDisableKeyRotation{}, middleware.After) +} + +func addOpDisconnectCustomKeyStoreValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDisconnectCustomKeyStore{}, middleware.After) +} + +func addOpEnableKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpEnableKey{}, middleware.After) +} + +func addOpEnableKeyRotationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpEnableKeyRotation{}, middleware.After) +} + +func addOpEncryptValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpEncrypt{}, middleware.After) +} + +func addOpGenerateDataKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGenerateDataKey{}, middleware.After) +} + +func addOpGenerateDataKeyPairValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGenerateDataKeyPair{}, middleware.After) +} + +func addOpGenerateDataKeyPairWithoutPlaintextValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGenerateDataKeyPairWithoutPlaintext{}, middleware.After) +} + +func addOpGenerateDataKeyWithoutPlaintextValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGenerateDataKeyWithoutPlaintext{}, middleware.After) +} + +func addOpGenerateMacValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGenerateMac{}, middleware.After) +} + +func addOpGetKeyPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetKeyPolicy{}, middleware.After) +} + +func addOpGetKeyRotationStatusValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetKeyRotationStatus{}, middleware.After) +} + +func addOpGetParametersForImportValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetParametersForImport{}, middleware.After) +} + +func addOpGetPublicKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetPublicKey{}, middleware.After) +} + +func addOpImportKeyMaterialValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpImportKeyMaterial{}, middleware.After) +} + +func addOpListGrantsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListGrants{}, middleware.After) +} + +func addOpListKeyPoliciesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListKeyPolicies{}, middleware.After) +} + +func addOpListResourceTagsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListResourceTags{}, middleware.After) +} + +func addOpListRetirableGrantsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListRetirableGrants{}, middleware.After) +} + +func addOpPutKeyPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutKeyPolicy{}, middleware.After) +} + +func addOpReEncryptValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpReEncrypt{}, middleware.After) +} + +func addOpReplicateKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpReplicateKey{}, middleware.After) +} + +func addOpRevokeGrantValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRevokeGrant{}, middleware.After) +} + +func addOpScheduleKeyDeletionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpScheduleKeyDeletion{}, middleware.After) +} + +func addOpSignValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpSign{}, middleware.After) +} + +func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpTagResource{}, middleware.After) +} + +func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After) +} + +func addOpUpdateAliasValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateAlias{}, middleware.After) +} + +func addOpUpdateCustomKeyStoreValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateCustomKeyStore{}, middleware.After) +} + +func addOpUpdateKeyDescriptionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateKeyDescription{}, middleware.After) +} + +func addOpUpdatePrimaryRegionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdatePrimaryRegion{}, middleware.After) +} + +func addOpVerifyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpVerify{}, middleware.After) +} + +func addOpVerifyMacValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpVerifyMac{}, middleware.After) +} + +func validateTag(v *types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tag"} + if v.TagKey == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagKey")) + } + if v.TagValue == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagValue")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTagList(v []types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagList"} + for i := range v { + if err := validateTag(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateXksProxyAuthenticationCredentialType(v *types.XksProxyAuthenticationCredentialType) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "XksProxyAuthenticationCredentialType"} + if v.AccessKeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessKeyId")) + } + if v.RawSecretAccessKey == nil { + invalidParams.Add(smithy.NewErrParamRequired("RawSecretAccessKey")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCancelKeyDeletionInput(v *CancelKeyDeletionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CancelKeyDeletionInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpConnectCustomKeyStoreInput(v *ConnectCustomKeyStoreInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ConnectCustomKeyStoreInput"} + if v.CustomKeyStoreId == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomKeyStoreId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateAliasInput(v *CreateAliasInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateAliasInput"} + if v.AliasName == nil { + invalidParams.Add(smithy.NewErrParamRequired("AliasName")) + } + if v.TargetKeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetKeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateCustomKeyStoreInput(v *CreateCustomKeyStoreInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateCustomKeyStoreInput"} + if v.CustomKeyStoreName == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomKeyStoreName")) + } + if v.XksProxyAuthenticationCredential != nil { + if err := validateXksProxyAuthenticationCredentialType(v.XksProxyAuthenticationCredential); err != nil { + invalidParams.AddNested("XksProxyAuthenticationCredential", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateGrantInput(v *CreateGrantInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateGrantInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.GranteePrincipal == nil { + invalidParams.Add(smithy.NewErrParamRequired("GranteePrincipal")) + } + if v.Operations == nil { + invalidParams.Add(smithy.NewErrParamRequired("Operations")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateKeyInput(v *CreateKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateKeyInput"} + if v.Tags != nil { + if err := validateTagList(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDecryptInput(v *DecryptInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DecryptInput"} + if v.CiphertextBlob == nil { + invalidParams.Add(smithy.NewErrParamRequired("CiphertextBlob")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteAliasInput(v *DeleteAliasInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteAliasInput"} + if v.AliasName == nil { + invalidParams.Add(smithy.NewErrParamRequired("AliasName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteCustomKeyStoreInput(v *DeleteCustomKeyStoreInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteCustomKeyStoreInput"} + if v.CustomKeyStoreId == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomKeyStoreId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteImportedKeyMaterialInput(v *DeleteImportedKeyMaterialInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteImportedKeyMaterialInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeKeyInput(v *DescribeKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDisableKeyInput(v *DisableKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DisableKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDisableKeyRotationInput(v *DisableKeyRotationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DisableKeyRotationInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDisconnectCustomKeyStoreInput(v *DisconnectCustomKeyStoreInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DisconnectCustomKeyStoreInput"} + if v.CustomKeyStoreId == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomKeyStoreId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpEnableKeyInput(v *EnableKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EnableKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpEnableKeyRotationInput(v *EnableKeyRotationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EnableKeyRotationInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpEncryptInput(v *EncryptInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EncryptInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.Plaintext == nil { + invalidParams.Add(smithy.NewErrParamRequired("Plaintext")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGenerateDataKeyInput(v *GenerateDataKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GenerateDataKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGenerateDataKeyPairInput(v *GenerateDataKeyPairInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GenerateDataKeyPairInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if len(v.KeyPairSpec) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("KeyPairSpec")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGenerateDataKeyPairWithoutPlaintextInput(v *GenerateDataKeyPairWithoutPlaintextInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GenerateDataKeyPairWithoutPlaintextInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if len(v.KeyPairSpec) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("KeyPairSpec")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGenerateDataKeyWithoutPlaintextInput(v *GenerateDataKeyWithoutPlaintextInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GenerateDataKeyWithoutPlaintextInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGenerateMacInput(v *GenerateMacInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GenerateMacInput"} + if v.Message == nil { + invalidParams.Add(smithy.NewErrParamRequired("Message")) + } + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if len(v.MacAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("MacAlgorithm")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetKeyPolicyInput(v *GetKeyPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetKeyPolicyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.PolicyName == nil { + invalidParams.Add(smithy.NewErrParamRequired("PolicyName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetKeyRotationStatusInput(v *GetKeyRotationStatusInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetKeyRotationStatusInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetParametersForImportInput(v *GetParametersForImportInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetParametersForImportInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if len(v.WrappingAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("WrappingAlgorithm")) + } + if len(v.WrappingKeySpec) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("WrappingKeySpec")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetPublicKeyInput(v *GetPublicKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetPublicKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpImportKeyMaterialInput(v *ImportKeyMaterialInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ImportKeyMaterialInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.ImportToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImportToken")) + } + if v.EncryptedKeyMaterial == nil { + invalidParams.Add(smithy.NewErrParamRequired("EncryptedKeyMaterial")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListGrantsInput(v *ListGrantsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListGrantsInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListKeyPoliciesInput(v *ListKeyPoliciesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListKeyPoliciesInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListResourceTagsInput(v *ListResourceTagsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListResourceTagsInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListRetirableGrantsInput(v *ListRetirableGrantsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListRetirableGrantsInput"} + if v.RetiringPrincipal == nil { + invalidParams.Add(smithy.NewErrParamRequired("RetiringPrincipal")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutKeyPolicyInput(v *PutKeyPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutKeyPolicyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.PolicyName == nil { + invalidParams.Add(smithy.NewErrParamRequired("PolicyName")) + } + if v.Policy == nil { + invalidParams.Add(smithy.NewErrParamRequired("Policy")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpReEncryptInput(v *ReEncryptInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReEncryptInput"} + if v.CiphertextBlob == nil { + invalidParams.Add(smithy.NewErrParamRequired("CiphertextBlob")) + } + if v.DestinationKeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("DestinationKeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpReplicateKeyInput(v *ReplicateKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicateKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.ReplicaRegion == nil { + invalidParams.Add(smithy.NewErrParamRequired("ReplicaRegion")) + } + if v.Tags != nil { + if err := validateTagList(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRevokeGrantInput(v *RevokeGrantInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RevokeGrantInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.GrantId == nil { + invalidParams.Add(smithy.NewErrParamRequired("GrantId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpScheduleKeyDeletionInput(v *ScheduleKeyDeletionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ScheduleKeyDeletionInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpSignInput(v *SignInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SignInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.Message == nil { + invalidParams.Add(smithy.NewErrParamRequired("Message")) + } + if len(v.SigningAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("SigningAlgorithm")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpTagResourceInput(v *TagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagResourceInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.Tags == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tags")) + } else if v.Tags != nil { + if err := validateTagList(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUntagResourceInput(v *UntagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UntagResourceInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.TagKeys == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagKeys")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateAliasInput(v *UpdateAliasInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateAliasInput"} + if v.AliasName == nil { + invalidParams.Add(smithy.NewErrParamRequired("AliasName")) + } + if v.TargetKeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetKeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateCustomKeyStoreInput(v *UpdateCustomKeyStoreInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateCustomKeyStoreInput"} + if v.CustomKeyStoreId == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomKeyStoreId")) + } + if v.XksProxyAuthenticationCredential != nil { + if err := validateXksProxyAuthenticationCredentialType(v.XksProxyAuthenticationCredential); err != nil { + invalidParams.AddNested("XksProxyAuthenticationCredential", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateKeyDescriptionInput(v *UpdateKeyDescriptionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateKeyDescriptionInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.Description == nil { + invalidParams.Add(smithy.NewErrParamRequired("Description")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdatePrimaryRegionInput(v *UpdatePrimaryRegionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdatePrimaryRegionInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.PrimaryRegion == nil { + invalidParams.Add(smithy.NewErrParamRequired("PrimaryRegion")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpVerifyInput(v *VerifyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "VerifyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.Message == nil { + invalidParams.Add(smithy.NewErrParamRequired("Message")) + } + if v.Signature == nil { + invalidParams.Add(smithy.NewErrParamRequired("Signature")) + } + if len(v.SigningAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("SigningAlgorithm")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpVerifyMacInput(v *VerifyMacInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "VerifyMacInput"} + if v.Message == nil { + invalidParams.Add(smithy.NewErrParamRequired("Message")) + } + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if len(v.MacAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("MacAlgorithm")) + } + if v.Mac == nil { + invalidParams.Add(smithy.NewErrParamRequired("Mac")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index 2f8860d2f2f..49b4e31d6bb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.11.26 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.11.25 (2022-10-24) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index e2de3ea3154..cbfe45ee1ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.25" +const goModuleVersion = "1.11.26" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index 4245e8d9fd0..b3b019177d8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.13.9 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.13.8 (2022-10-24) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index 9c79d16f414..a5a50c97fa9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.8" +const goModuleVersion = "1.13.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 6255c0bc5d9..106016915f4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.17.6 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.17.5 (2022-11-22) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 9e6b85cc418..ae6f9e766dd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.5" +const goModuleVersion = "1.17.6" diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index 41bbcfac3a9..1e23bf95b30 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,7 @@ +# Release (2022-12-02) + +* No change notes available for this release. + # Release (2022-10-24) ## Module Highlights diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md index 789b3788960..a4bb43fbe9b 100644 --- a/vendor/github.com/aws/smithy-go/README.md +++ b/vendor/github.com/aws/smithy-go/README.md @@ -2,7 +2,7 @@ [![Go Build Status](https://github.com/aws/smithy-go/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/go.yml)[![Codegen Build Status](https://github.com/aws/smithy-go/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/codegen.yml) -Smithy code generators for Go. +[Smithy](https://smithy.io/) code generators for Go. **WARNING: All interfaces are subject to change.** diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go index d6e1e41e164..f9200093e87 100644 --- a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go +++ b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go @@ -2,7 +2,7 @@ Package xml holds the XMl encoder utility. This utility is written in accordance to our design to delegate to shape serializer function in which a xml.Value will be passed around. -Resources followed: https://awslabs.github.io/smithy/1.0/spec/core/xml-traits.html# +Resources followed: https://smithy.io/2.0/spec/protocol-traits.html#xml-bindings Member Element diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index 4ed5881885b..8eaac41e7ac 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.4" +const goModuleVersion = "1.13.5" diff --git a/vendor/github.com/cenkalti/backoff/v3/.gitignore b/vendor/github.com/cenkalti/backoff/v3/.gitignore new file mode 100644 index 00000000000..00268614f04 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/cenkalti/backoff/v3/.travis.yml b/vendor/github.com/cenkalti/backoff/v3/.travis.yml new file mode 100644 index 00000000000..47a6a46ec2a --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.7 + - 1.x + - tip +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/v3/LICENSE b/vendor/github.com/cenkalti/backoff/v3/LICENSE new file mode 100644 index 00000000000..89b81799655 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v3/README.md b/vendor/github.com/cenkalti/backoff/v3/README.md new file mode 100644 index 00000000000..3673df487f9 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/README.md @@ -0,0 +1,33 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +Import path is `github.com/cenkalti/backoff/v3`. Please note the version part at the end. + +godoc.org does not support modules yet, +so you can use https://godoc.org/gopkg.in/cenkalti/backoff.v3 to view the documentation. + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://godoc.org/github.com/cenkalti/backoff +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[travis]: https://travis-ci.org/cenkalti/backoff +[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master +[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master +[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ diff --git a/vendor/github.com/cenkalti/backoff/v3/backoff.go b/vendor/github.com/cenkalti/backoff/v3/backoff.go new file mode 100644 index 00000000000..3676ee405d8 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff. Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v3/context.go b/vendor/github.com/cenkalti/backoff/v3/context.go new file mode 100644 index 00000000000..fcff86c1b3d --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/context.go @@ -0,0 +1,66 @@ +package backoff + +import ( + "context" + "time" +) + +// BackOffContext is a backoff policy that stops retrying after the context +// is canceled. +type BackOffContext interface { // nolint: golint + BackOff + Context() context.Context +} + +type backOffContext struct { + BackOff + ctx context.Context +} + +// WithContext returns a BackOffContext with context ctx +// +// ctx must not be nil +func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint + if ctx == nil { + panic("nil context") + } + + if b, ok := b.(*backOffContext); ok { + return &backOffContext{ + BackOff: b.BackOff, + ctx: ctx, + } + } + + return &backOffContext{ + BackOff: b, + ctx: ctx, + } +} + +func getContext(b BackOff) context.Context { + if cb, ok := b.(BackOffContext); ok { + return cb.Context() + } + if tb, ok := b.(*backOffTries); ok { + return getContext(tb.delegate) + } + return context.Background() +} + +func (b *backOffContext) Context() context.Context { + return b.ctx +} + +func (b *backOffContext) NextBackOff() time.Duration { + select { + case <-b.ctx.Done(): + return Stop + default: + } + next := b.BackOff.NextBackOff() + if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { // nolint: gosimple + return Stop + } + return next +} diff --git a/vendor/github.com/cenkalti/backoff/v3/exponential.go b/vendor/github.com/cenkalti/backoff/v3/exponential.go new file mode 100644 index 00000000000..cb11cc1d21e --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/exponential.go @@ -0,0 +1,154 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff stops. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Clock: SystemClock, + } + b.Reset() + return b +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { + return Stop + } + defer b.incrementCurrentInterval() + return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v3/retry.go b/vendor/github.com/cenkalti/backoff/v3/retry.go new file mode 100644 index 00000000000..6c776ccf8ed --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/retry.go @@ -0,0 +1,96 @@ +package backoff + +import "time" + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { + return RetryNotify(o, b, nil) +} + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + return RetryNotifyWithTimer(operation, b, notify, nil) +} + +// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer +// for each failed attempt before sleep. +// A default timer that uses system timer is used when nil is passed. +func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { + var err error + var next time.Duration + if t == nil { + t = &defaultTimer{} + } + + defer func() { + t.Stop() + }() + + ctx := getContext(b) + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + if permanent, ok := err.(*PermanentError); ok { + return permanent.Err + } + + if next = b.NextBackOff(); next == Stop { + return err + } + + if notify != nil { + notify(err, next) + } + + t.Start(next) + + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C(): + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +func (e *PermanentError) Unwrap() error { + return e.Err +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) *PermanentError { + return &PermanentError{ + Err: err, + } +} diff --git a/vendor/github.com/cenkalti/backoff/v3/ticker.go b/vendor/github.com/cenkalti/backoff/v3/ticker.go new file mode 100644 index 00000000000..ed699e0e300 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/ticker.go @@ -0,0 +1,94 @@ +package backoff + +import ( + "context" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + ctx context.Context + timer Timer + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + return NewTickerWithTimer(b, &defaultTimer{}) +} + +// NewTickerWithTimer returns a new Ticker with a custom timer. +// A default timer that uses system timer is used when nil is passed. +func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + ctx: getContext(b), + timer: timer, + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.ctx.Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + t.timer.Start(next) + return t.timer.C() +} diff --git a/vendor/github.com/cenkalti/backoff/v3/timer.go b/vendor/github.com/cenkalti/backoff/v3/timer.go new file mode 100644 index 00000000000..8120d0213c5 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type Timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v3/tries.go b/vendor/github.com/cenkalti/backoff/v3/tries.go new file mode 100644 index 00000000000..cfeefd9b764 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/tries.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 00000000000..25fdaf639df --- /dev/null +++ b/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md new file mode 100644 index 00000000000..5152bf59bf8 --- /dev/null +++ b/vendor/github.com/fatih/color/README.md @@ -0,0 +1,178 @@ +# color [![](https://github.com/fatih/color/workflows/build/badge.svg)](https://github.com/fatih/color/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/fatih/color)](https://pkg.go.dev/github.com/fatih/color) + +Color lets you use colorized outputs in terms of [ANSI Escape +Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It +has support for Windows too! The API can be used in several ways, pick one that +suits you. + +![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg) + + +## Install + +```bash +go get github.com/fatih/color +``` + +## Examples + +### Standard colors + +```go +// Print with default helper functions +color.Cyan("Prints text in cyan.") + +// A newline will be appended automatically +color.Blue("Prints %s in blue.", "text") + +// These are using the default foreground colors +color.Red("We have red") +color.Magenta("And many others ..") + +``` + +### Mix and reuse colors + +```go +// Create a new color object +c := color.New(color.FgCyan).Add(color.Underline) +c.Println("Prints cyan text with an underline.") + +// Or just add them to New() +d := color.New(color.FgCyan, color.Bold) +d.Printf("This prints bold cyan %s\n", "too!.") + +// Mix up foreground and background colors, create new mixes! +red := color.New(color.FgRed) + +boldRed := red.Add(color.Bold) +boldRed.Println("This will print text in bold red.") + +whiteBackground := red.Add(color.BgWhite) +whiteBackground.Println("Red text with white background.") +``` + +### Use your own output (io.Writer) + +```go +// Use your own io.Writer output +color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + +blue := color.New(color.FgBlue) +blue.Fprint(writer, "This will print text in blue.") +``` + +### Custom print functions (PrintFunc) + +```go +// Create a custom print function for convenience +red := color.New(color.FgRed).PrintfFunc() +red("Warning") +red("Error: %s", err) + +// Mix up multiple attributes +notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() +notice("Don't forget this...") +``` + +### Custom fprint functions (FprintFunc) + +```go +blue := color.New(color.FgBlue).FprintfFunc() +blue(myWriter, "important notice: %s", stars) + +// Mix up with multiple attributes +success := color.New(color.Bold, color.FgGreen).FprintlnFunc() +success(myWriter, "Don't forget this...") +``` + +### Insert into noncolor strings (SprintFunc) + +```go +// Create SprintXxx functions to mix strings with other non-colorized strings: +yellow := color.New(color.FgYellow).SprintFunc() +red := color.New(color.FgRed).SprintFunc() +fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) + +info := color.New(color.FgWhite, color.BgGreen).SprintFunc() +fmt.Printf("This %s rocks!\n", info("package")) + +// Use helper functions +fmt.Println("This", color.RedString("warning"), "should be not neglected.") +fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") + +// Windows supported too! Just don't forget to change the output to color.Output +fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) +``` + +### Plug into existing code + +```go +// Use handy standard colors +color.Set(color.FgYellow) + +fmt.Println("Existing text will now be in yellow") +fmt.Printf("This one %s\n", "too") + +color.Unset() // Don't forget to unset + +// You can mix up parameters +color.Set(color.FgMagenta, color.Bold) +defer color.Unset() // Use it in your function + +fmt.Println("All text will now be bold magenta.") +``` + +### Disable/Enable color + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams +(for example if the output were piped directly to `less`). + +The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment +variable is set (regardless of its value). + +`Color` has support to disable/enable colors programatically both globally and +for single color definitions. For example suppose you have a CLI app and a +`--no-color` bool flag. You can easily disable the color output with: + +```go +var flagNoColor = flag.Bool("no-color", false, "Disable color output") + +if *flagNoColor { + color.NoColor = true // disables colorized output +} +``` + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + +```go +c := color.New(color.FgCyan) +c.Println("Prints cyan text") + +c.DisableColor() +c.Println("This is printed without any color") + +c.EnableColor() +c.Println("This prints again cyan...") +``` + +## GitHub Actions + +To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams. + +## Todo + +* Save/Return previous values +* Evaluate fmt.Formatter interface + + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) + +## License + +The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go new file mode 100644 index 00000000000..98a60f3c88d --- /dev/null +++ b/vendor/github.com/fatih/color/color.go @@ -0,0 +1,618 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. It's also set to true if the NO_COLOR environment variable is + // set (regardless of its value). This is a global option and affects all + // colors. For more control over each color block use the methods + // DisableColor() individually. + NoColor = noColorExists() || os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// noColorExists returns true if the environment variable NO_COLOR exists. +func noColorExists() bool { + _, exists := os.LookupEnv("NO_COLOR") + return exists +} + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{ + params: make([]Attribute, 0), + } + + if noColorExists() { + c.noColor = boolPtr(true) + } + + c.Add(value...) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +func (c *Color) setWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(w, c.format()) + return c +} + +func (c *Color) unsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +func (c *Color) prepend(value Attribute) { + c.params = append(c.params, 0) + copy(c.params[1:], c.params[0:]) + c.params[0] = value +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintln(w, a...) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintln(Output, a...) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + return fmt.Sprintf("%s[%dm", escape, Reset) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user set action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go new file mode 100644 index 00000000000..04541de786f --- /dev/null +++ b/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,135 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However there are times where custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +You can also disable the color by setting the NO_COLOR environment variable to any value. + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 00000000000..16686a65523 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/empty/empty.proto + +package empty + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/empty.proto. + +type Empty = emptypb.Empty + +var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{ + 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() } +func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() { + if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File + file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore new file mode 100644 index 00000000000..042091d9b3b --- /dev/null +++ b/vendor/github.com/golang/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS new file mode 100644 index 00000000000..52ccb5a934d --- /dev/null +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -0,0 +1,18 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Amazon.com, Inc +Damian Gryski +Eric Buth +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Klaus Post +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 00000000000..ea6524ddd02 --- /dev/null +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,41 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Alex Legg +Damian Gryski +Eric Buth +Jan Mercl <0xjnml@gmail.com> +Jonathan Swinney +Kai Backman +Klaus Post +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 00000000000..6050c10f4c8 --- /dev/null +++ b/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README new file mode 100644 index 00000000000..cea12879a0e --- /dev/null +++ b/vendor/github.com/golang/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 00000000000..23c6e26c6b9 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 00000000000..e6179f65e35 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/golang/snappy/decode_arm64.s new file mode 100644 index 00000000000..7a3ead17eac --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_arm64.s @@ -0,0 +1,494 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - R2 scratch +// - R3 scratch +// - R4 length or x +// - R5 offset +// - R6 &src[s] +// - R7 &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7. +// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6. +TEXT ·decode(SB), NOSPLIT, $56-56 + // Initialize R6, R7 and R8-R13. + MOVD dst_base+0(FP), R8 + MOVD dst_len+8(FP), R9 + MOVD R8, R7 + MOVD R8, R10 + ADD R9, R10, R10 + MOVD src_base+24(FP), R11 + MOVD src_len+32(FP), R12 + MOVD R11, R6 + MOVD R11, R13 + ADD R12, R13, R13 + +loop: + // for s < len(src) + CMP R13, R6 + BEQ end + + // R4 = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBU (R6), R4 + MOVW R4, R3 + ANDW $3, R3 + MOVW $1, R1 + CMPW R1, R3 + BGE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + MOVW $60, R1 + LSRW $2, R4, R4 + CMPW R4, R1 + BLS tagLit60Plus + + // case x < 60: + // s++ + ADD $1, R6, R6 + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that R4 == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // R4 can hold 64 bits, so the increment cannot overflow. + ADD $1, R4, R4 + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // R2 = len(dst) - d + // R3 = len(src) - s + MOVD R10, R2 + SUB R7, R2, R2 + MOVD R13, R3 + SUB R6, R3, R3 + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMP $16, R4 + BGT callMemmove + CMP $16, R2 + BLT callMemmove + CMP $16, R3 + BLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + LDP 0(R6), (R14, R15) + STP (R14, R15), 0(R7) + + // d += length + // s += length + ADD R4, R7, R7 + ADD R4, R6, R6 + B loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMP R2, R4 + BGT errCorrupt + CMP R3, R4 + BGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // R7, R6 and R4 as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVD R7, 8(RSP) + MOVD R6, 16(RSP) + MOVD R4, 24(RSP) + MOVD R7, 32(RSP) + MOVD R6, 40(RSP) + MOVD R4, 48(RSP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVD 32(RSP), R7 + MOVD 40(RSP), R6 + MOVD 48(RSP), R4 + MOVD dst_base+0(FP), R8 + MOVD dst_len+8(FP), R9 + MOVD R8, R10 + ADD R9, R10, R10 + MOVD src_base+24(FP), R11 + MOVD src_len+32(FP), R12 + MOVD R11, R13 + ADD R12, R13, R13 + + // d += length + // s += length + ADD R4, R7, R7 + ADD R4, R6, R6 + B loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADD R4, R6, R6 + SUB $58, R6, R6 + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // case x == 60: + MOVW $61, R1 + CMPW R1, R4 + BEQ tagLit61 + BGT tagLit62Plus + + // x = uint32(src[s-1]) + MOVBU -1(R6), R4 + B doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVHU -2(R6), R4 + B doLit + +tagLit62Plus: + CMPW $62, R4 + BHI tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVHU -3(R6), R4 + MOVBU -1(R6), R3 + ORR R3<<16, R4 + B doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVWU -4(R6), R4 + B doLit + + // The code above handles literal tags. + // ---------------------------------------- + // The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADD $5, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // length = 1 + int(src[s-5])>>2 + MOVD $1, R1 + ADD R4>>2, R1, R4 + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVWU -4(R6), R5 + B doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADD $3, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // length = 1 + int(src[s-3])>>2 + MOVD $1, R1 + ADD R4>>2, R1, R4 + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVHU -2(R6), R5 + B doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - R3 == src[s] & 0x03 + // - R4 == src[s] + CMP $2, R3 + BEQ tagCopy2 + BGT tagCopy4 + + // case tagCopy1: + // s += 2 + ADD $2, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVD R4, R5 + AND $0xe0, R5 + MOVBU -1(R6), R3 + ORR R5<<3, R3, R5 + + // length = 4 + int(src[s-2])>>2&0x7 + MOVD $7, R1 + AND R4>>2, R1, R4 + ADD $4, R4, R4 + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - R4 == length && R4 > 0 + // - R5 == offset + + // if offset <= 0 { etc } + MOVD $0, R1 + CMP R1, R5 + BLE errCorrupt + + // if d < offset { etc } + MOVD R7, R3 + SUB R8, R3, R3 + CMP R5, R3 + BLT errCorrupt + + // if length > len(dst)-d { etc } + MOVD R10, R3 + SUB R7, R3, R3 + CMP R3, R4 + BGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVD R10, R14 + SUB R7, R14, R14 + MOVD R7, R15 + SUB R5, R15, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMP $16, R4 + BGT slowForwardCopy + CMP $8, R5 + BLT slowForwardCopy + CMP $16, R14 + BLT slowForwardCopy + MOVD 0(R15), R2 + MOVD R2, 0(R7) + MOVD 8(R15), R3 + MOVD R3, 8(R7) + ADD R4, R7, R7 + B loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUB $10, R14, R14 + CMP R14, R4 + BGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMP $8, R5 + BGE fixUpSlowForwardCopy + MOVD (R15), R3 + MOVD R3, (R7) + SUB R5, R4, R4 + ADD R5, R7, R7 + ADD R5, R5, R5 + B makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by R7 being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save R7 to R2 so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVD R7, R2 + ADD R4, R7, R7 + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + MOVD $0, R1 + CMP R1, R4 + BLE loop + MOVD (R15), R3 + MOVD R3, (R2) + ADD $8, R15, R15 + ADD $8, R2, R2 + SUB $8, R4, R4 + B finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), R3 + MOVB R3, (R7) + ADD $1, R15, R15 + ADD $1, R7, R7 + SUB $1, R4, R4 + CBNZ R4, verySlowForwardCopy + B loop + + // The code above handles copy tags. + // ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMP R10, R7 + BNE errCorrupt + + // return 0 + MOVD $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVD $1, R2 + MOVD R2, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_asm.go b/vendor/github.com/golang/snappy/decode_asm.go new file mode 100644 index 00000000000..7082b349199 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_asm.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm +// +build amd64 arm64 + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 00000000000..2f672be5574 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!arm64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 00000000000..7f23657076c --- /dev/null +++ b/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 00000000000..adfd979fe27 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s new file mode 100644 index 00000000000..f8d54adfc5c --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_arm64.s @@ -0,0 +1,722 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - R3 len(lit) +// - R4 n +// - R6 return value +// - R8 &dst[i] +// - R10 &lit[0] +// +// The 32 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $32-56 + MOVD dst_base+0(FP), R8 + MOVD lit_base+24(FP), R10 + MOVD lit_len+32(FP), R3 + MOVD R3, R6 + MOVW R3, R4 + SUBW $1, R4, R4 + + CMPW $60, R4 + BLT oneByte + CMPW $256, R4 + BLT twoBytes + +threeBytes: + MOVD $0xf4, R2 + MOVB R2, 0(R8) + MOVW R4, 1(R8) + ADD $3, R8, R8 + ADD $3, R6, R6 + B memmove + +twoBytes: + MOVD $0xf0, R2 + MOVB R2, 0(R8) + MOVB R4, 1(R8) + ADD $2, R8, R8 + ADD $2, R6, R6 + B memmove + +oneByte: + LSLW $2, R4, R4 + MOVB R4, 0(R8) + ADD $1, R8, R8 + ADD $1, R6, R6 + +memmove: + MOVD R6, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // R8, R10 and R3 as arguments. + MOVD R8, 8(RSP) + MOVD R10, 16(RSP) + MOVD R3, 24(RSP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - R3 length +// - R7 &dst[0] +// - R8 &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVD dst_base+0(FP), R8 + MOVD R8, R7 + MOVD offset+24(FP), R11 + MOVD length+32(FP), R3 + +loop0: + // for length >= 68 { etc } + CMPW $68, R3 + BLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVD $0xfe, R2 + MOVB R2, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUB $64, R3, R3 + B loop0 + +step1: + // if length > 64 { etc } + CMP $64, R3 + BLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVD $0xee, R2 + MOVB R2, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUB $60, R3, R3 + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMP $12, R3 + BGE step3 + CMPW $2048, R11 + BGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(R8) + LSRW $3, R11, R11 + AND $0xe0, R11, R11 + SUB $4, R3, R3 + LSLW $2, R3 + AND $0xff, R3, R3 + ORRW R3, R11, R11 + ORRW $1, R11, R11 + MOVB R11, 0(R8) + ADD $2, R8, R8 + + // Return the number of bytes written. + SUB R7, R8, R8 + MOVD R8, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUB $1, R3, R3 + AND $0xff, R3, R3 + LSLW $2, R3, R3 + ORRW $2, R3, R3 + MOVB R3, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + + // Return the number of bytes written. + SUB R7, R8, R8 + MOVD R8, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - R6 &src[0] +// - R7 &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVD src_base+0(FP), R6 + MOVD src_len+8(FP), R14 + MOVD i+24(FP), R15 + MOVD j+32(FP), R7 + ADD R6, R14, R14 + ADD R6, R15, R15 + ADD R6, R7, R7 + MOVD R14, R13 + SUB $8, R13, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMP R13, R7 + BHI cmp1 + MOVD (R15), R3 + MOVD (R7), R4 + CMP R4, R3 + BNE bsf + ADD $8, R15, R15 + ADD $8, R7, R7 + B cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. + // RBIT reverses the bit order, then CLZ counts the leading zeros, the + // combination of which finds the least significant bit which is set. + // The arm64 architecture is little-endian, and the shift by 3 converts + // a bit index to a byte index. + EOR R3, R4, R4 + RBIT R4, R4 + CLZ R4, R4 + ADD R4>>3, R7, R7 + + // Convert from &src[ret] to ret. + SUB R6, R7, R7 + MOVD R7, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMP R7, R14 + BLS extendMatchEnd + MOVB (R15), R3 + MOVB (R7), R4 + CMP R4, R3 + BNE extendMatchEnd + ADD $1, R15, R15 + ADD $1, R7, R7 + B cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUB R6, R7, R7 + MOVD R7, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - R3 . . +// - R4 . . +// - R5 64 shift +// - R6 72 &src[0], tableSize +// - R7 80 &src[s] +// - R8 88 &dst[d] +// - R9 96 sLimit +// - R10 . &src[nextEmit] +// - R11 104 prevHash, currHash, nextHash, offset +// - R12 112 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 120 candidate +// - R16 . hash constant, 0x1e35a7bd +// - R17 . &table +// - . 128 table +// +// The second column (64, 72, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 64 + 64 = 32896. +TEXT ·encodeBlock(SB), 0, $32896-56 + MOVD dst_base+0(FP), R8 + MOVD src_base+24(FP), R7 + MOVD src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVD $24, R5 + MOVD $256, R6 + MOVW $0xa7bd, R16 + MOVKW $(0x1e35<<16), R16 + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + MOVD $16384, R2 + CMP R2, R6 + BGE varTable + CMP R14, R6 + BGE varTable + SUB $1, R5, R5 + LSL $1, R6, R6 + B calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each + // iterations writes 64 bytes, so we can do only tableSize/32 writes + // instead of the 2048 writes that would zero-initialize all of table's + // 32768 bytes. This clear could overrun the first tableSize elements, but + // it won't overrun the allocated stack size. + ADD $128, RSP, R17 + MOVD R17, R4 + + // !!! R6 = &src[tableSize] + ADD R6<<1, R17, R6 + +memclr: + STP.P (ZR, ZR), 64(R4) + STP (ZR, ZR), -48(R4) + STP (ZR, ZR), -32(R4) + STP (ZR, ZR), -16(R4) + CMP R4, R6 + BHI memclr + + // !!! R6 = &src[0] + MOVD R7, R6 + + // sLimit := len(src) - inputMargin + MOVD R14, R9 + SUB $15, R9, R9 + + // !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't + // change for the rest of the function. + MOVD R5, 64(RSP) + MOVD R6, 72(RSP) + MOVD R9, 96(RSP) + + // nextEmit := 0 + MOVD R6, R10 + + // s := 1 + ADD $1, R7, R7 + + // nextHash := hash(load32(src, s), shift) + MOVW 0(R7), R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + +outer: + // for { etc } + + // skip := 32 + MOVD $32, R12 + + // nextS := s + MOVD R7, R13 + + // candidate := 0 + MOVD $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVD R13, R7 + + // bytesBetweenHashLookups := skip >> 5 + MOVD R12, R14 + LSR $5, R14, R14 + + // nextS = s + bytesBetweenHashLookups + ADD R14, R13, R13 + + // skip += bytesBetweenHashLookups + ADD R14, R12, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVD R13, R3 + SUB R6, R3, R3 + CMP R9, R3 + BHI emitRemainder + + // candidate = int(table[nextHash]) + MOVHU 0(R17)(R11<<1), R15 + + // table[nextHash] = uint16(s) + MOVD R7, R3 + SUB R6, R3, R3 + + MOVH R3, 0(R17)(R11<<1) + + // nextHash = hash(load32(src, nextS), shift) + MOVW 0(R13), R11 + MULW R16, R11 + LSRW R5, R11, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVW 0(R7), R3 + MOVW (R6)(R15), R4 + CMPW R4, R3 + BNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVD R7, R3 + SUB R10, R3, R3 + CMP $16, R3 + BLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVW R3, R4 + SUBW $1, R4, R4 + + MOVW $60, R2 + CMPW R2, R4 + BLT inlineEmitLiteralOneByte + MOVW $256, R2 + CMPW R2, R4 + BLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVD $0xf4, R1 + MOVB R1, 0(R8) + MOVW R4, 1(R8) + ADD $3, R8, R8 + B inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVD $0xf0, R1 + MOVB R1, 0(R8) + MOVB R4, 1(R8) + ADD $2, R8, R8 + B inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + LSLW $2, R4, R4 + MOVB R4, 0(R8) + ADD $1, R8, R8 + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // R8, R10 and R3 as arguments. + MOVD R8, 8(RSP) + MOVD R10, 16(RSP) + MOVD R3, 24(RSP) + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADD R3, R8, R8 + MOVD R7, 80(RSP) + MOVD R8, 88(RSP) + MOVD R15, 120(RSP) + CALL runtime·memmove(SB) + MOVD 64(RSP), R5 + MOVD 72(RSP), R6 + MOVD 80(RSP), R7 + MOVD 88(RSP), R8 + MOVD 96(RSP), R9 + MOVD 120(RSP), R15 + ADD $128, RSP, R17 + MOVW $0xa7bd, R16 + MOVKW $(0x1e35<<16), R16 + B inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB R3, R4 + SUBW $1, R4, R4 + AND $0xff, R4, R4 + LSLW $2, R4, R4 + MOVB R4, (R8) + ADD $1, R8, R8 + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + LDP 0(R10), (R0, R1) + STP (R0, R1), 0(R8) + ADD R3, R8, R8 + +inner1: + // for { etc } + + // base := s + MOVD R7, R12 + + // !!! offset := base - candidate + MOVD R12, R11 + SUB R15, R11, R11 + SUB R6, R11, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVD src_len+32(FP), R14 + ADD R6, R14, R14 + + // !!! R13 = &src[len(src) - 8] + MOVD R14, R13 + SUB $8, R13, R13 + + // !!! R15 = &src[candidate + 4] + ADD $4, R15, R15 + ADD R6, R15, R15 + + // !!! s += 4 + ADD $4, R7, R7 + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMP R13, R7 + BHI inlineExtendMatchCmp1 + MOVD (R15), R3 + MOVD (R7), R4 + CMP R4, R3 + BNE inlineExtendMatchBSF + ADD $8, R15, R15 + ADD $8, R7, R7 + B inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. + // RBIT reverses the bit order, then CLZ counts the leading zeros, the + // combination of which finds the least significant bit which is set. + // The arm64 architecture is little-endian, and the shift by 3 converts + // a bit index to a byte index. + EOR R3, R4, R4 + RBIT R4, R4 + CLZ R4, R4 + ADD R4>>3, R7, R7 + B inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMP R7, R14 + BLS inlineExtendMatchEnd + MOVB (R15), R3 + MOVB (R7), R4 + CMP R4, R3 + BNE inlineExtendMatchEnd + ADD $1, R15, R15 + ADD $1, R7, R7 + B inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVD R7, R3 + SUB R12, R3, R3 + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + MOVW $68, R2 + CMPW R2, R3 + BLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVD $0xfe, R1 + MOVB R1, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUBW $64, R3, R3 + B inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + MOVW $64, R2 + CMPW R2, R3 + BLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVD $0xee, R1 + MOVB R1, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUBW $60, R3, R3 + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + MOVW $12, R2 + CMPW R2, R3 + BGE inlineEmitCopyStep3 + MOVW $2048, R2 + CMPW R2, R11 + BGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(R8) + LSRW $8, R11, R11 + LSLW $5, R11, R11 + SUBW $4, R3, R3 + AND $0xff, R3, R3 + LSLW $2, R3, R3 + ORRW R3, R11, R11 + ORRW $1, R11, R11 + MOVB R11, 0(R8) + ADD $2, R8, R8 + B inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBW $1, R3, R3 + LSLW $2, R3, R3 + ORRW $2, R3, R3 + MOVB R3, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVD R7, R10 + + // if s >= sLimit { goto emitRemainder } + MOVD R7, R3 + SUB R6, R3, R3 + CMP R3, R9 + BLS emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVD -1(R7), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // table[prevHash] = uint16(s-1) + MOVD R7, R3 + SUB R6, R3, R3 + SUB $1, R3, R3 + + MOVHU R3, 0(R17)(R11<<1) + + // currHash := hash(uint32(x>>8), shift) + LSR $8, R14, R14 + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // candidate = int(table[currHash]) + MOVHU 0(R17)(R11<<1), R15 + + // table[currHash] = uint16(s) + ADD $1, R3, R3 + MOVHU R3, 0(R17)(R11<<1) + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVW (R6)(R15), R4 + CMPW R4, R14 + BEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + LSR $8, R14, R14 + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // s++ + ADD $1, R7, R7 + + // break out of the inner1 for loop, i.e. continue the outer loop. + B outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVD src_len+32(FP), R3 + ADD R6, R3, R3 + CMP R3, R10 + BEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVD R8, 8(RSP) + MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative. + MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative. + MOVD R10, 32(RSP) + SUB R10, R3, R3 + MOVD R3, 40(RSP) + MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVD R8, 88(RSP) + CALL ·emitLiteral(SB) + MOVD 88(RSP), R8 + + // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVD 56(RSP), R1 + ADD R1, R8, R8 + +encodeBlockEnd: + MOVD dst_base+0(FP), R3 + SUB R3, R8, R8 + MOVD R8, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_asm.go b/vendor/github.com/golang/snappy/encode_asm.go new file mode 100644 index 00000000000..107c1e71418 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_asm.go @@ -0,0 +1,30 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm +// +build amd64 arm64 + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 00000000000..296d7f0beb0 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!arm64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 00000000000..ece692ea461 --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE b/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go new file mode 100644 index 00000000000..0ba9da7d6bf --- /dev/null +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go @@ -0,0 +1,168 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner"). +// +// The signer binary is OS-specific, but exposes a standard set of APIs for the client to use. +package client + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/gob" + "fmt" + "io" + "net/rpc" + "os" + "os/exec" + + "github.com/googleapis/enterprise-certificate-proxy/client/util" +) + +const signAPI = "EnterpriseCertSigner.Sign" +const certificateChainAPI = "EnterpriseCertSigner.CertificateChain" +const publicKeyAPI = "EnterpriseCertSigner.Public" + +// A Connection wraps a pair of unidirectional streams as an io.ReadWriteCloser. +type Connection struct { + io.ReadCloser + io.WriteCloser +} + +// Close closes c's underlying ReadCloser and WriteCloser. +func (c *Connection) Close() error { + rerr := c.ReadCloser.Close() + werr := c.WriteCloser.Close() + if rerr != nil { + return rerr + } + return werr +} + +func init() { + gob.Register(crypto.SHA256) + gob.Register(&rsa.PSSOptions{}) +} + +// SignArgs contains arguments to a crypto Signer.Sign method. +type SignArgs struct { + Digest []byte // The content to sign. + Opts crypto.SignerOpts // Options for signing, such as Hash identifier. +} + +// Key implements credential.Credential by holding the executed signer subprocess. +type Key struct { + cmd *exec.Cmd // Pointer to the signer subprocess. + client *rpc.Client // Pointer to the rpc client that communicates with the signer subprocess. + publicKey crypto.PublicKey // Public key of loaded certificate. + chain [][]byte // Certificate chain of loaded certificate. +} + +// CertificateChain returns the credential as a raw X509 cert chain. This contains the public key. +func (k *Key) CertificateChain() [][]byte { + return k.chain +} + +// Close closes the RPC connection and kills the signer subprocess. +// Call this to free up resources when the Key object is no longer needed. +func (k *Key) Close() error { + if err := k.cmd.Process.Kill(); err != nil { + return fmt.Errorf("failed to kill signer process: %w", err) + } + if err := k.cmd.Wait(); err.Error() != "signal: killed" { + return fmt.Errorf("signer process was not killed: %w", err) + } + // The Pipes connecting the RPC client should have been closed when the signer subprocess was killed. + // Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault. + if err := k.client.Close(); err.Error() != "close |0: file already closed" { + return fmt.Errorf("failed to close RPC connection: %w", err) + } + return nil +} + +// Public returns the public key for this Key. +func (k *Key) Public() crypto.PublicKey { + return k.publicKey +} + +// Sign signs a message digest, using the specified signer options. +func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) { + if opts != nil && opts.HashFunc() != 0 && len(digest) != opts.HashFunc().Size() { + return nil, fmt.Errorf("Digest length of %v bytes does not match Hash function size of %v bytes", len(digest), opts.HashFunc().Size()) + } + err = k.client.Call(signAPI, SignArgs{Digest: digest, Opts: opts}, &signed) + return +} + +// Cred spawns a signer subprocess that listens on stdin/stdout to perform certificate +// related operations, including signing messages with the private key. +// +// The signer binary path is read from the specified configFilePath, if provided. +// Otherwise, use the default config file path. +// +// The config file also specifies which certificate the signer should use. +func Cred(configFilePath string) (*Key, error) { + if configFilePath == "" { + configFilePath = util.GetDefaultConfigFilePath() + } + enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath) + if err != nil { + return nil, err + } + k := &Key{ + cmd: exec.Command(enterpriseCertSignerPath, configFilePath), + } + + // Redirect errors from subprocess to parent process. + k.cmd.Stderr = os.Stderr + + // RPC client will communicate with subprocess over stdin/stdout. + kin, err := k.cmd.StdinPipe() + if err != nil { + return nil, err + } + kout, err := k.cmd.StdoutPipe() + if err != nil { + return nil, err + } + k.client = rpc.NewClient(&Connection{kout, kin}) + + if err := k.cmd.Start(); err != nil { + return nil, fmt.Errorf("starting enterprise cert signer subprocess: %w", err) + } + + if err := k.client.Call(certificateChainAPI, struct{}{}, &k.chain); err != nil { + return nil, fmt.Errorf("failed to retrieve certificate chain: %w", err) + } + + var publicKeyBytes []byte + if err := k.client.Call(publicKeyAPI, struct{}{}, &publicKeyBytes); err != nil { + return nil, fmt.Errorf("failed to retrieve public key: %w", err) + } + + publicKey, err := x509.ParsePKIXPublicKey(publicKeyBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse public key: %w", err) + } + + var ok bool + k.publicKey, ok = publicKey.(crypto.PublicKey) + if !ok { + return nil, fmt.Errorf("invalid public key type: %T", publicKey) + } + + switch pub := k.publicKey.(type) { + case *rsa.PublicKey: + if pub.Size() < 256 { + return nil, fmt.Errorf("RSA modulus size is less than 2048 bits: %v", pub.Size()*8) + } + case *ecdsa.PublicKey: + default: + return nil, fmt.Errorf("unsupported public key type: %v", pub) + } + + return k, nil +} diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go new file mode 100644 index 00000000000..ccef5278a30 --- /dev/null +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go @@ -0,0 +1,71 @@ +// Package util provides helper functions for the client. +package util + +import ( + "encoding/json" + "errors" + "io/ioutil" + "os" + "os/user" + "path/filepath" + "runtime" +) + +const configFileName = "certificate_config.json" + +// EnterpriseCertificateConfig contains parameters for initializing signer. +type EnterpriseCertificateConfig struct { + Libs Libs `json:"libs"` +} + +// Libs specifies the locations of helper libraries. +type Libs struct { + ECP string `json:"ecp"` +} + +// LoadSignerBinaryPath retrieves the path of the signer binary from the config file. +func LoadSignerBinaryPath(configFilePath string) (path string, err error) { + jsonFile, err := os.Open(configFilePath) + if err != nil { + return "", err + } + + byteValue, err := ioutil.ReadAll(jsonFile) + if err != nil { + return "", err + } + var config EnterpriseCertificateConfig + err = json.Unmarshal(byteValue, &config) + if err != nil { + return "", err + } + signerBinaryPath := config.Libs.ECP + if signerBinaryPath == "" { + return "", errors.New("signer binary path is missing") + } + return signerBinaryPath, nil +} + +func guessHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} + +func getDefaultConfigFileDirectory() (directory string) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud") + } + return filepath.Join(guessHomeDir(), ".config/gcloud") +} + +// GetDefaultConfigFilePath returns the default path of the enterprise certificate config file created by gCloud. +func GetDefaultConfigFilePath() (path string) { + return filepath.Join(getDefaultConfigFileDirectory(), configFileName) +} diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json new file mode 100644 index 00000000000..d88960b7ef1 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + "v2": "2.7.0" +} diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md new file mode 100644 index 00000000000..b75170f2227 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -0,0 +1,47 @@ +# Changelog + +## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02) + + +### Features + +* update google.golang.org/api to latest ([#240](https://github.com/googleapis/gax-go/issues/240)) ([f690a02](https://github.com/googleapis/gax-go/commit/f690a02c806a2903bdee943ede3a58e3a331ebd6)) +* **v2/apierror:** add apierror.FromWrappingError ([#238](https://github.com/googleapis/gax-go/issues/238)) ([9dbd96d](https://github.com/googleapis/gax-go/commit/9dbd96d59b9d54ceb7c025513aa8c1a9d727382f)) + +## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13) + + +### Features + +* **v2:** copy DetermineContentType functionality ([#230](https://github.com/googleapis/gax-go/issues/230)) ([2c52a70](https://github.com/googleapis/gax-go/commit/2c52a70bae965397f740ed27d46aabe89ff249b3)) + +## [2.5.1](https://github.com/googleapis/gax-go/compare/v2.5.0...v2.5.1) (2022-08-04) + + +### Bug Fixes + +* **v2:** resolve bad genproto pseudoversion in go.mod ([#218](https://github.com/googleapis/gax-go/issues/218)) ([1379b27](https://github.com/googleapis/gax-go/commit/1379b27e9846d959f7e1163b9ef298b3c92c8d23)) + +## [2.5.0](https://github.com/googleapis/gax-go/compare/v2.4.0...v2.5.0) (2022-08-04) + + +### Features + +* add ExtractProtoMessage to apierror ([#213](https://github.com/googleapis/gax-go/issues/213)) ([a6ce70c](https://github.com/googleapis/gax-go/commit/a6ce70c725c890533a9de6272d3b5ba2e336d6bb)) + +## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09) + + +### Features + +* **v2:** add OnHTTPCodes CallOption ([#188](https://github.com/googleapis/gax-go/issues/188)) ([ba7c534](https://github.com/googleapis/gax-go/commit/ba7c5348363ab6c33e1cee3c03c0be68a46ca07c)) + + +### Bug Fixes + +* **v2/apierror:** use errors.As in FromError ([#189](https://github.com/googleapis/gax-go/issues/189)) ([f30f05b](https://github.com/googleapis/gax-go/commit/f30f05be583828f4c09cca4091333ea88ff8d79e)) + + +### Miscellaneous Chores + +* **v2:** bump release-please processing ([#192](https://github.com/googleapis/gax-go/issues/192)) ([56172f9](https://github.com/googleapis/gax-go/commit/56172f971d1141d7687edaac053ad3470af76719)) diff --git a/vendor/github.com/googleapis/gax-go/v2/LICENSE b/vendor/github.com/googleapis/gax-go/v2/LICENSE new file mode 100644 index 00000000000..6d16b6578a2 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/LICENSE @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go new file mode 100644 index 00000000000..aa6be1304f1 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -0,0 +1,342 @@ +// Copyright 2021, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package apierror implements a wrapper error for parsing error details from +// API calls. Both HTTP & gRPC status errors are supported. +package apierror + +import ( + "errors" + "fmt" + "strings" + + jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto" + "google.golang.org/api/googleapi" + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +// ErrDetails holds the google/rpc/error_details.proto messages. +type ErrDetails struct { + ErrorInfo *errdetails.ErrorInfo + BadRequest *errdetails.BadRequest + PreconditionFailure *errdetails.PreconditionFailure + QuotaFailure *errdetails.QuotaFailure + RetryInfo *errdetails.RetryInfo + ResourceInfo *errdetails.ResourceInfo + RequestInfo *errdetails.RequestInfo + DebugInfo *errdetails.DebugInfo + Help *errdetails.Help + LocalizedMessage *errdetails.LocalizedMessage + + // Unknown stores unidentifiable error details. + Unknown []interface{} +} + +// ErrMessageNotFound is used to signal ExtractProtoMessage found no matching messages. +var ErrMessageNotFound = errors.New("message not found") + +// ExtractProtoMessage provides a mechanism for extracting protobuf messages from the +// Unknown error details. If ExtractProtoMessage finds an unknown message of the same type, +// the content of the message is copied to the provided message. +// +// ExtractProtoMessage will return ErrMessageNotFound if there are no message matching the +// protocol buffer type of the provided message. +func (e ErrDetails) ExtractProtoMessage(v proto.Message) error { + if v == nil { + return ErrMessageNotFound + } + for _, elem := range e.Unknown { + if elemProto, ok := elem.(proto.Message); ok { + if v.ProtoReflect().Type() == elemProto.ProtoReflect().Type() { + proto.Merge(v, elemProto) + return nil + } + } + } + return ErrMessageNotFound +} + +func (e ErrDetails) String() string { + var d strings.Builder + if e.ErrorInfo != nil { + d.WriteString(fmt.Sprintf("error details: name = ErrorInfo reason = %s domain = %s metadata = %s\n", + e.ErrorInfo.GetReason(), e.ErrorInfo.GetDomain(), e.ErrorInfo.GetMetadata())) + } + + if e.BadRequest != nil { + v := e.BadRequest.GetFieldViolations() + var f []string + var desc []string + for _, x := range v { + f = append(f, x.GetField()) + desc = append(desc, x.GetDescription()) + } + d.WriteString(fmt.Sprintf("error details: name = BadRequest field = %s desc = %s\n", + strings.Join(f, " "), strings.Join(desc, " "))) + } + + if e.PreconditionFailure != nil { + v := e.PreconditionFailure.GetViolations() + var t []string + var s []string + var desc []string + for _, x := range v { + t = append(t, x.GetType()) + s = append(s, x.GetSubject()) + desc = append(desc, x.GetDescription()) + } + d.WriteString(fmt.Sprintf("error details: name = PreconditionFailure type = %s subj = %s desc = %s\n", strings.Join(t, " "), + strings.Join(s, " "), strings.Join(desc, " "))) + } + + if e.QuotaFailure != nil { + v := e.QuotaFailure.GetViolations() + var s []string + var desc []string + for _, x := range v { + s = append(s, x.GetSubject()) + desc = append(desc, x.GetDescription()) + } + d.WriteString(fmt.Sprintf("error details: name = QuotaFailure subj = %s desc = %s\n", + strings.Join(s, " "), strings.Join(desc, " "))) + } + + if e.RequestInfo != nil { + d.WriteString(fmt.Sprintf("error details: name = RequestInfo id = %s data = %s\n", + e.RequestInfo.GetRequestId(), e.RequestInfo.GetServingData())) + } + + if e.ResourceInfo != nil { + d.WriteString(fmt.Sprintf("error details: name = ResourceInfo type = %s resourcename = %s owner = %s desc = %s\n", + e.ResourceInfo.GetResourceType(), e.ResourceInfo.GetResourceName(), + e.ResourceInfo.GetOwner(), e.ResourceInfo.GetDescription())) + + } + if e.RetryInfo != nil { + d.WriteString(fmt.Sprintf("error details: retry in %s\n", e.RetryInfo.GetRetryDelay().AsDuration())) + + } + if e.Unknown != nil { + var s []string + for _, x := range e.Unknown { + s = append(s, fmt.Sprintf("%v", x)) + } + d.WriteString(fmt.Sprintf("error details: name = Unknown desc = %s\n", strings.Join(s, " "))) + } + + if e.DebugInfo != nil { + d.WriteString(fmt.Sprintf("error details: name = DebugInfo detail = %s stack = %s\n", e.DebugInfo.GetDetail(), + strings.Join(e.DebugInfo.GetStackEntries(), " "))) + } + if e.Help != nil { + var desc []string + var url []string + for _, x := range e.Help.Links { + desc = append(desc, x.GetDescription()) + url = append(url, x.GetUrl()) + } + d.WriteString(fmt.Sprintf("error details: name = Help desc = %s url = %s\n", + strings.Join(desc, " "), strings.Join(url, " "))) + } + if e.LocalizedMessage != nil { + d.WriteString(fmt.Sprintf("error details: name = LocalizedMessage locale = %s msg = %s\n", + e.LocalizedMessage.GetLocale(), e.LocalizedMessage.GetMessage())) + } + + return d.String() +} + +// APIError wraps either a gRPC Status error or a HTTP googleapi.Error. It +// implements error and Status interfaces. +type APIError struct { + err error + status *status.Status + httpErr *googleapi.Error + details ErrDetails +} + +// Details presents the error details of the APIError. +func (a *APIError) Details() ErrDetails { + return a.details +} + +// Unwrap extracts the original error. +func (a *APIError) Unwrap() error { + return a.err +} + +// Error returns a readable representation of the APIError. +func (a *APIError) Error() string { + var msg string + if a.status != nil { + msg = a.err.Error() + } else if a.httpErr != nil { + // Truncate the googleapi.Error message because it dumps the Details in + // an ugly way. + msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message) + } + return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details)) +} + +// GRPCStatus extracts the underlying gRPC Status error. +// This method is necessary to fulfill the interface +// described in https://pkg.go.dev/google.golang.org/grpc/status#FromError. +func (a *APIError) GRPCStatus() *status.Status { + return a.status +} + +// Reason returns the reason in an ErrorInfo. +// If ErrorInfo is nil, it returns an empty string. +func (a *APIError) Reason() string { + return a.details.ErrorInfo.GetReason() +} + +// Domain returns the domain in an ErrorInfo. +// If ErrorInfo is nil, it returns an empty string. +func (a *APIError) Domain() string { + return a.details.ErrorInfo.GetDomain() +} + +// Metadata returns the metadata in an ErrorInfo. +// If ErrorInfo is nil, it returns nil. +func (a *APIError) Metadata() map[string]string { + return a.details.ErrorInfo.GetMetadata() + +} + +// setDetailsFromError parses a Status error or a googleapi.Error +// and sets status and details or httpErr and details, respectively. +// It returns false if neither Status nor googleapi.Error can be parsed. +func (a *APIError) setDetailsFromError(err error) bool { + st, isStatus := status.FromError(err) + var herr *googleapi.Error + isHTTPErr := errors.As(err, &herr) + + switch { + case isStatus: + a.status = st + a.details = parseDetails(st.Details()) + case isHTTPErr: + a.httpErr = herr + a.details = parseHTTPDetails(herr) + default: + return false + } + return true +} + +// FromError parses a Status error or a googleapi.Error and builds an +// APIError, wrapping the provided error in the new APIError. It +// returns false if neither Status nor googleapi.Error can be parsed. +func FromError(err error) (*APIError, bool) { + return ParseError(err, true) +} + +// ParseError parses a Status error or a googleapi.Error and builds an +// APIError. If wrap is true, it wraps the error in the new APIError. +// It returns false if neither Status nor googleapi.Error can be parsed. +func ParseError(err error, wrap bool) (*APIError, bool) { + if err == nil { + return nil, false + } + ae := APIError{} + if wrap { + ae = APIError{err: err} + } + if !ae.setDetailsFromError(err) { + return nil, false + } + return &ae, true +} + +// parseDetails accepts a slice of interface{} that should be backed by some +// sort of proto.Message that can be cast to the google/rpc/error_details.proto +// types. +// +// This is for internal use only. +func parseDetails(details []interface{}) ErrDetails { + var ed ErrDetails + for _, d := range details { + switch d := d.(type) { + case *errdetails.ErrorInfo: + ed.ErrorInfo = d + case *errdetails.BadRequest: + ed.BadRequest = d + case *errdetails.PreconditionFailure: + ed.PreconditionFailure = d + case *errdetails.QuotaFailure: + ed.QuotaFailure = d + case *errdetails.RetryInfo: + ed.RetryInfo = d + case *errdetails.ResourceInfo: + ed.ResourceInfo = d + case *errdetails.RequestInfo: + ed.RequestInfo = d + case *errdetails.DebugInfo: + ed.DebugInfo = d + case *errdetails.Help: + ed.Help = d + case *errdetails.LocalizedMessage: + ed.LocalizedMessage = d + default: + ed.Unknown = append(ed.Unknown, d) + } + } + + return ed +} + +// parseHTTPDetails will convert the given googleapi.Error into the protobuf +// representation then parse the Any values that contain the error details. +// +// This is for internal use only. +func parseHTTPDetails(gae *googleapi.Error) ErrDetails { + e := &jsonerror.Error{} + if err := protojson.Unmarshal([]byte(gae.Body), e); err != nil { + // If the error body does not conform to the error schema, ignore it + // altogther. See https://cloud.google.com/apis/design/errors#http_mapping. + return ErrDetails{} + } + + // Coerce the Any messages into proto.Message then parse the details. + details := []interface{}{} + for _, any := range e.GetError().GetDetails() { + m, err := any.UnmarshalNew() + if err != nil { + // Ignore malformed Any values. + continue + } + details = append(details, m) + } + + return parseDetails(details) +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md new file mode 100644 index 00000000000..9ff0caea946 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md @@ -0,0 +1,30 @@ +# HTTP JSON Error Schema + +The `error.proto` represents the HTTP-JSON schema used by Google APIs to convey +error payloads as described by https://cloud.google.com/apis/design/errors#http_mapping. +This package is for internal parsing logic only and should not be used in any +other context. + +## Regeneration + +To regenerate the protobuf Go code you will need the following: + +* A local copy of [googleapis], the absolute path to which should be exported to +the environment variable `GOOGLEAPIS` +* The protobuf compiler [protoc] +* The Go [protobuf plugin] +* The [goimports] tool + +From this directory run the following command: +```sh +protoc -I $GOOGLEAPIS -I. --go_out=. --go_opt=module=github.com/googleapis/gax-go/v2/apierror/internal/proto error.proto +goimports -w . +``` + +Note: the `module` plugin option ensures the generated code is placed in this +directory, and not in several nested directories defined by `go_package` option. + +[googleapis]: https://github.com/googleapis/googleapis +[protoc]: https://github.com/protocolbuffers/protobuf#protocol-compiler-installation +[protobuf plugin]: https://developers.google.com/protocol-buffers/docs/reference/go-generated +[goimports]: https://pkg.go.dev/golang.org/x/tools/cmd/goimports \ No newline at end of file diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go new file mode 100644 index 00000000000..e4b03f161d8 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go @@ -0,0 +1,256 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.17.3 +// source: custom_error.proto + +package jsonerror + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Error code for `CustomError`. +type CustomError_CustomErrorCode int32 + +const ( + // Default error. + CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED CustomError_CustomErrorCode = 0 + // Too many foo. + CustomError_TOO_MANY_FOO CustomError_CustomErrorCode = 1 + // Not enough foo. + CustomError_NOT_ENOUGH_FOO CustomError_CustomErrorCode = 2 + // Catastrophic error. + CustomError_UNIVERSE_WAS_DESTROYED CustomError_CustomErrorCode = 3 +) + +// Enum value maps for CustomError_CustomErrorCode. +var ( + CustomError_CustomErrorCode_name = map[int32]string{ + 0: "CUSTOM_ERROR_CODE_UNSPECIFIED", + 1: "TOO_MANY_FOO", + 2: "NOT_ENOUGH_FOO", + 3: "UNIVERSE_WAS_DESTROYED", + } + CustomError_CustomErrorCode_value = map[string]int32{ + "CUSTOM_ERROR_CODE_UNSPECIFIED": 0, + "TOO_MANY_FOO": 1, + "NOT_ENOUGH_FOO": 2, + "UNIVERSE_WAS_DESTROYED": 3, + } +) + +func (x CustomError_CustomErrorCode) Enum() *CustomError_CustomErrorCode { + p := new(CustomError_CustomErrorCode) + *p = x + return p +} + +func (x CustomError_CustomErrorCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CustomError_CustomErrorCode) Descriptor() protoreflect.EnumDescriptor { + return file_custom_error_proto_enumTypes[0].Descriptor() +} + +func (CustomError_CustomErrorCode) Type() protoreflect.EnumType { + return &file_custom_error_proto_enumTypes[0] +} + +func (x CustomError_CustomErrorCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CustomError_CustomErrorCode.Descriptor instead. +func (CustomError_CustomErrorCode) EnumDescriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0, 0} +} + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +type CustomError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error code specific to the custom API being invoked. + Code CustomError_CustomErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=error.CustomError_CustomErrorCode" json:"code,omitempty"` + // Name of the failed entity. + Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"` + // Message that describes the error. + ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *CustomError) Reset() { + *x = CustomError{} + if protoimpl.UnsafeEnabled { + mi := &file_custom_error_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomError) ProtoMessage() {} + +func (x *CustomError) ProtoReflect() protoreflect.Message { + mi := &file_custom_error_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomError.ProtoReflect.Descriptor instead. +func (*CustomError) Descriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0} +} + +func (x *CustomError) GetCode() CustomError_CustomErrorCode { + if x != nil { + return x.Code + } + return CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED +} + +func (x *CustomError) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *CustomError) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_custom_error_proto protoreflect.FileDescriptor + +var file_custom_error_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x0b, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x76, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, + 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x4f, 0x4f, 0x5f, 0x4d, 0x41, + 0x4e, 0x59, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4e, 0x4f, 0x54, 0x5f, + 0x45, 0x4e, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, + 0x55, 0x4e, 0x49, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x57, 0x41, 0x53, 0x5f, 0x44, 0x45, 0x53, + 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_custom_error_proto_rawDescOnce sync.Once + file_custom_error_proto_rawDescData = file_custom_error_proto_rawDesc +) + +func file_custom_error_proto_rawDescGZIP() []byte { + file_custom_error_proto_rawDescOnce.Do(func() { + file_custom_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_custom_error_proto_rawDescData) + }) + return file_custom_error_proto_rawDescData +} + +var file_custom_error_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_custom_error_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_custom_error_proto_goTypes = []interface{}{ + (CustomError_CustomErrorCode)(0), // 0: error.CustomError.CustomErrorCode + (*CustomError)(nil), // 1: error.CustomError +} +var file_custom_error_proto_depIdxs = []int32{ + 0, // 0: error.CustomError.code:type_name -> error.CustomError.CustomErrorCode + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_custom_error_proto_init() } +func file_custom_error_proto_init() { + if File_custom_error_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_custom_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_custom_error_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_custom_error_proto_goTypes, + DependencyIndexes: file_custom_error_proto_depIdxs, + EnumInfos: file_custom_error_proto_enumTypes, + MessageInfos: file_custom_error_proto_msgTypes, + }.Build() + File_custom_error_proto = out.File + file_custom_error_proto_rawDesc = nil + file_custom_error_proto_goTypes = nil + file_custom_error_proto_depIdxs = nil +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto new file mode 100644 index 00000000000..21678ae65c9 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto @@ -0,0 +1,50 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package error; + +option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; + + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +message CustomError { + + // Error code for `CustomError`. + enum CustomErrorCode { + // Default error. + CUSTOM_ERROR_CODE_UNSPECIFIED = 0; + + // Too many foo. + TOO_MANY_FOO = 1; + + // Not enough foo. + NOT_ENOUGH_FOO = 2; + + // Catastrophic error. + UNIVERSE_WAS_DESTROYED = 3; + + } + + // Error code specific to the custom API being invoked. + CustomErrorCode code = 1; + + // Name of the failed entity. + string entity = 2; + + // Message that describes the error. + string error_message = 3; +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go new file mode 100644 index 00000000000..7dd9b83739a --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go @@ -0,0 +1,280 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.15.8 +// source: apierror/internal/proto/error.proto + +package jsonerror + +import ( + reflect "reflect" + sync "sync" + + code "google.golang.org/genproto/googleapis/rpc/code" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The error format v2 for Google JSON REST APIs. +// Copied from https://cloud.google.com/apis/design/errors#http_mapping. +// +// NOTE: This schema is not used for other wire protocols. +type Error struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The actual error payload. The nested message structure is for backward + // compatibility with Google API client libraries. It also makes the error + // more readable to developers. + Error *Error_Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *Error) Reset() { + *x = Error{} + if protoimpl.UnsafeEnabled { + mi := &file_apierror_internal_proto_error_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Error) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Error) ProtoMessage() {} + +func (x *Error) ProtoReflect() protoreflect.Message { + mi := &file_apierror_internal_proto_error_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Error.ProtoReflect.Descriptor instead. +func (*Error) Descriptor() ([]byte, []int) { + return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0} +} + +func (x *Error) GetError() *Error_Status { + if x != nil { + return x.Error + } + return nil +} + +// This message has the same semantics as `google.rpc.Status`. It uses HTTP +// status code instead of gRPC status code. It has an extra field `status` +// for backward compatibility with Google API Client Libraries. +type Error_Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The HTTP status code that corresponds to `google.rpc.Status.code`. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // This corresponds to `google.rpc.Status.message`. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // This is the enum version for `google.rpc.Status.code`. + Status code.Code `protobuf:"varint,4,opt,name=status,proto3,enum=google.rpc.Code" json:"status,omitempty"` + // This corresponds to `google.rpc.Status.details`. + Details []*anypb.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *Error_Status) Reset() { + *x = Error_Status{} + if protoimpl.UnsafeEnabled { + mi := &file_apierror_internal_proto_error_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Error_Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Error_Status) ProtoMessage() {} + +func (x *Error_Status) ProtoReflect() protoreflect.Message { + mi := &file_apierror_internal_proto_error_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Error_Status.ProtoReflect.Descriptor instead. +func (*Error_Status) Descriptor() ([]byte, []int) { + return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Error_Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Error_Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Error_Status) GetStatus() code.Code { + if x != nil { + return x.Status + } + return code.Code(0) +} + +func (x *Error_Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +var File_apierror_internal_proto_error_proto protoreflect.FileDescriptor + +var file_apierror_internal_proto_error_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, + 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, + 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_apierror_internal_proto_error_proto_rawDescOnce sync.Once + file_apierror_internal_proto_error_proto_rawDescData = file_apierror_internal_proto_error_proto_rawDesc +) + +func file_apierror_internal_proto_error_proto_rawDescGZIP() []byte { + file_apierror_internal_proto_error_proto_rawDescOnce.Do(func() { + file_apierror_internal_proto_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_apierror_internal_proto_error_proto_rawDescData) + }) + return file_apierror_internal_proto_error_proto_rawDescData +} + +var file_apierror_internal_proto_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_apierror_internal_proto_error_proto_goTypes = []interface{}{ + (*Error)(nil), // 0: error.Error + (*Error_Status)(nil), // 1: error.Error.Status + (code.Code)(0), // 2: google.rpc.Code + (*anypb.Any)(nil), // 3: google.protobuf.Any +} +var file_apierror_internal_proto_error_proto_depIdxs = []int32{ + 1, // 0: error.Error.error:type_name -> error.Error.Status + 2, // 1: error.Error.Status.status:type_name -> google.rpc.Code + 3, // 2: error.Error.Status.details:type_name -> google.protobuf.Any + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_apierror_internal_proto_error_proto_init() } +func file_apierror_internal_proto_error_proto_init() { + if File_apierror_internal_proto_error_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_apierror_internal_proto_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Error); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apierror_internal_proto_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Error_Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_apierror_internal_proto_error_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_apierror_internal_proto_error_proto_goTypes, + DependencyIndexes: file_apierror_internal_proto_error_proto_depIdxs, + MessageInfos: file_apierror_internal_proto_error_proto_msgTypes, + }.Build() + File_apierror_internal_proto_error_proto = out.File + file_apierror_internal_proto_error_proto_rawDesc = nil + file_apierror_internal_proto_error_proto_goTypes = nil + file_apierror_internal_proto_error_proto_depIdxs = nil +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto new file mode 100644 index 00000000000..4b9b13ce111 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto @@ -0,0 +1,46 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package error; + +import "google/protobuf/any.proto"; +import "google/rpc/code.proto"; + +option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; + +// The error format v2 for Google JSON REST APIs. +// Copied from https://cloud.google.com/apis/design/errors#http_mapping. +// +// NOTE: This schema is not used for other wire protocols. +message Error { + // This message has the same semantics as `google.rpc.Status`. It uses HTTP + // status code instead of gRPC status code. It has an extra field `status` + // for backward compatibility with Google API Client Libraries. + message Status { + // The HTTP status code that corresponds to `google.rpc.Status.code`. + int32 code = 1; + // This corresponds to `google.rpc.Status.message`. + string message = 2; + // This is the enum version for `google.rpc.Status.code`. + google.rpc.Code status = 4; + // This corresponds to `google.rpc.Status.details`. + repeated google.protobuf.Any details = 5; + } + // The actual error payload. The nested message structure is for backward + // compatibility with Google API client libraries. It also makes the error + // more readable to developers. + Status error = 1; +} diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go new file mode 100644 index 00000000000..e092005563b --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go @@ -0,0 +1,244 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "errors" + "math/rand" + "time" + + "google.golang.org/api/googleapi" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// CallOption is an option used by Invoke to control behaviors of RPC calls. +// CallOption works by modifying relevant fields of CallSettings. +type CallOption interface { + // Resolve applies the option by modifying cs. + Resolve(cs *CallSettings) +} + +// Retryer is used by Invoke to determine retry behavior. +type Retryer interface { + // Retry reports whether a request should be retried and how long to pause before retrying + // if the previous attempt returned with err. Invoke never calls Retry with nil error. + Retry(err error) (pause time.Duration, shouldRetry bool) +} + +type retryerOption func() Retryer + +func (o retryerOption) Resolve(s *CallSettings) { + s.Retry = o +} + +// WithRetry sets CallSettings.Retry to fn. +func WithRetry(fn func() Retryer) CallOption { + return retryerOption(fn) +} + +// OnErrorFunc returns a Retryer that retries if and only if the previous attempt +// returns an error that satisfies shouldRetry. +// +// Pause times between retries are specified by bo. bo is only used for its +// parameters; each Retryer has its own copy. +func OnErrorFunc(bo Backoff, shouldRetry func(err error) bool) Retryer { + return &errorRetryer{ + shouldRetry: shouldRetry, + backoff: bo, + } +} + +type errorRetryer struct { + backoff Backoff + shouldRetry func(err error) bool +} + +func (r *errorRetryer) Retry(err error) (time.Duration, bool) { + if r.shouldRetry(err) { + return r.backoff.Pause(), true + } + + return 0, false +} + +// OnCodes returns a Retryer that retries if and only if +// the previous attempt returns a GRPC error whose error code is stored in cc. +// Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnCodes(cc []codes.Code, bo Backoff) Retryer { + return &boRetryer{ + backoff: bo, + codes: append([]codes.Code(nil), cc...), + } +} + +type boRetryer struct { + backoff Backoff + codes []codes.Code +} + +func (r *boRetryer) Retry(err error) (time.Duration, bool) { + st, ok := status.FromError(err) + if !ok { + return 0, false + } + c := st.Code() + for _, rc := range r.codes { + if c == rc { + return r.backoff.Pause(), true + } + } + return 0, false +} + +// OnHTTPCodes returns a Retryer that retries if and only if +// the previous attempt returns a googleapi.Error whose status code is stored in +// cc. Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnHTTPCodes(bo Backoff, cc ...int) Retryer { + codes := make(map[int]bool, len(cc)) + for _, c := range cc { + codes[c] = true + } + + return &httpRetryer{ + backoff: bo, + codes: codes, + } +} + +type httpRetryer struct { + backoff Backoff + codes map[int]bool +} + +func (r *httpRetryer) Retry(err error) (time.Duration, bool) { + var gerr *googleapi.Error + if !errors.As(err, &gerr) { + return 0, false + } + + if r.codes[gerr.Code] { + return r.backoff.Pause(), true + } + + return 0, false +} + +// Backoff implements exponential backoff. The wait time between retries is a +// random value between 0 and the "retry period" - the time between retries. The +// retry period starts at Initial and increases by the factor of Multiplier +// every retry, but is capped at Max. +// +// Note: MaxNumRetries / RPCDeadline is specifically not provided. These should +// be built on top of Backoff. +type Backoff struct { + // Initial is the initial value of the retry period, defaults to 1 second. + Initial time.Duration + + // Max is the maximum value of the retry period, defaults to 30 seconds. + Max time.Duration + + // Multiplier is the factor by which the retry period increases. + // It should be greater than 1 and defaults to 2. + Multiplier float64 + + // cur is the current retry period. + cur time.Duration +} + +// Pause returns the next time.Duration that the caller should use to backoff. +func (bo *Backoff) Pause() time.Duration { + if bo.Initial == 0 { + bo.Initial = time.Second + } + if bo.cur == 0 { + bo.cur = bo.Initial + } + if bo.Max == 0 { + bo.Max = 30 * time.Second + } + if bo.Multiplier < 1 { + bo.Multiplier = 2 + } + // Select a duration between 1ns and the current max. It might seem + // counterintuitive to have so much jitter, but + // https://www.awsarchitectureblog.com/2015/03/backoff.html argues that + // that is the best strategy. + d := time.Duration(1 + rand.Int63n(int64(bo.cur))) + bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) + if bo.cur > bo.Max { + bo.cur = bo.Max + } + return d +} + +type grpcOpt []grpc.CallOption + +func (o grpcOpt) Resolve(s *CallSettings) { + s.GRPC = o +} + +type pathOpt struct { + p string +} + +func (p pathOpt) Resolve(s *CallSettings) { + s.Path = p.p +} + +// WithPath applies a Path override to the HTTP-based APICall. +// +// This is for internal use only. +func WithPath(p string) CallOption { + return &pathOpt{p: p} +} + +// WithGRPCOptions allows passing gRPC call options during client creation. +func WithGRPCOptions(opt ...grpc.CallOption) CallOption { + return grpcOpt(append([]grpc.CallOption(nil), opt...)) +} + +// CallSettings allow fine-grained control over how calls are made. +type CallSettings struct { + // Retry returns a Retryer to be used to control retry logic of a method call. + // If Retry is nil or the returned Retryer is nil, the call will not be retried. + Retry func() Retryer + + // CallOptions to be forwarded to GRPC. + GRPC []grpc.CallOption + + // Path is an HTTP override for an APICall. + Path string +} diff --git a/vendor/github.com/googleapis/gax-go/v2/content_type.go b/vendor/github.com/googleapis/gax-go/v2/content_type.go new file mode 100644 index 00000000000..1b53d0a3ac1 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/content_type.go @@ -0,0 +1,112 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "io" + "io/ioutil" + "net/http" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// The content of media will be sniffed to determine the content type. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader) (io.Reader, string) { + // For backwards compatibility, allow clients to set content + // type by providing a ContentTyper for media. + // Note: This is an anonymous interface definition copied from googleapi.ContentTyper. + if typer, ok := media.(interface { + ContentType() string + }); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go new file mode 100644 index 00000000000..36cdfa33e35 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/gax.go @@ -0,0 +1,41 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gax contains a set of modules which aid the development of APIs +// for clients and servers based on gRPC and Google API conventions. +// +// Application code will rarely need to use this library directly. +// However, code generated automatically from API definition files can use it +// to simplify code generation and to provide more convenient and idiomatic API surfaces. +package gax + +import "github.com/googleapis/gax-go/v2/internal" + +// Version specifies the gax-go version being used. +const Version = internal.Version diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go new file mode 100644 index 00000000000..139371a0bf1 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -0,0 +1,53 @@ +// Copyright 2018, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import "bytes" + +// XGoogHeader is for use by the Google Cloud Libraries only. +// +// XGoogHeader formats key-value pairs. +// The resulting string is suitable for x-goog-api-client header. +func XGoogHeader(keyval ...string) string { + if len(keyval) == 0 { + return "" + } + if len(keyval)%2 != 0 { + panic("gax.Header: odd argument count") + } + var buf bytes.Buffer + for i := 0; i < len(keyval); i += 2 { + buf.WriteByte(' ') + buf.WriteString(keyval[i]) + buf.WriteByte('/') + buf.WriteString(keyval[i+1]) + } + return buf.String()[1:] +} diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go new file mode 100644 index 00000000000..0ba5da1dd1e --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -0,0 +1,33 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package internal + +// Version is the current tagged release of the library. +const Version = "2.7.0" diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go new file mode 100644 index 00000000000..9fcc29959b9 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/invoke.go @@ -0,0 +1,104 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "context" + "strings" + "time" + + "github.com/googleapis/gax-go/v2/apierror" +) + +// APICall is a user defined call stub. +type APICall func(context.Context, CallSettings) error + +// Invoke calls the given APICall, performing retries as specified by opts, if +// any. +func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { + var settings CallSettings + for _, opt := range opts { + opt.Resolve(&settings) + } + return invoke(ctx, call, settings, Sleep) +} + +// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. +// If interrupted, Sleep returns ctx.Err(). +func Sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +type sleeper func(ctx context.Context, d time.Duration) error + +// invoke implements Invoke, taking an additional sleeper argument for testing. +func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { + var retryer Retryer + for { + err := call(ctx, settings) + if err == nil { + return nil + } + // Never retry permanent certificate errors. (e.x. if ca-certificates + // are not installed). We should only make very few, targeted + // exceptions: many (other) status=Unavailable should be retried, such + // as if there's a network hiccup, or the internet goes out for a + // minute. This is also why here we are doing string parsing instead of + // simply making Unavailable a non-retried code elsewhere. + if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") { + return err + } + if apierr, ok := apierror.FromError(err); ok { + err = apierr + } + if settings.Retry == nil { + return err + } + if retryer == nil { + if r := settings.Retry(); r != nil { + retryer = r + } else { + return err + } + } + if d, ok := retryer.Retry(err); !ok { + return err + } else if err = sp(ctx, d); err != nil { + return err + } + } +} diff --git a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go new file mode 100644 index 00000000000..cc4486eb9e5 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go @@ -0,0 +1,126 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "encoding/json" + "errors" + "io" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var ( + arrayOpen = json.Delim('[') + arrayClose = json.Delim(']') + errBadOpening = errors.New("unexpected opening token, expected '['") +) + +// ProtoJSONStream represents a wrapper for consuming a stream of protobuf +// messages encoded using protobuf-JSON format. More information on this format +// can be found at https://developers.google.com/protocol-buffers/docs/proto3#json. +// The stream must appear as a comma-delimited, JSON array of obbjects with +// opening and closing square braces. +// +// This is for internal use only. +type ProtoJSONStream struct { + first, closed bool + reader io.ReadCloser + stream *json.Decoder + typ protoreflect.MessageType +} + +// NewProtoJSONStreamReader accepts a stream of bytes via an io.ReadCloser that are +// protobuf-JSON encoded protobuf messages of the given type. The ProtoJSONStream +// must be closed when done. +// +// This is for internal use only. +func NewProtoJSONStreamReader(rc io.ReadCloser, typ protoreflect.MessageType) *ProtoJSONStream { + return &ProtoJSONStream{ + first: true, + reader: rc, + stream: json.NewDecoder(rc), + typ: typ, + } +} + +// Recv decodes the next protobuf message in the stream or returns io.EOF if +// the stream is done. It is not safe to call Recv on the same stream from +// different goroutines, just like it is not safe to do so with a single gRPC +// stream. Type-cast the protobuf message returned to the type provided at +// ProtoJSONStream creation. +// Calls to Recv after calling Close will produce io.EOF. +func (s *ProtoJSONStream) Recv() (proto.Message, error) { + if s.closed { + return nil, io.EOF + } + if s.first { + s.first = false + + // Consume the opening '[' so Decode gets one object at a time. + if t, err := s.stream.Token(); err != nil { + return nil, err + } else if t != arrayOpen { + return nil, errBadOpening + } + } + + // Capture the next block of data for the item (a JSON object) in the stream. + var raw json.RawMessage + if err := s.stream.Decode(&raw); err != nil { + e := err + // To avoid checking the first token of each stream, just attempt to + // Decode the next blob and if that fails, double check if it is just + // the closing token ']'. If it is the closing, return io.EOF. If it + // isn't, return the original error. + if t, _ := s.stream.Token(); t == arrayClose { + e = io.EOF + } + return nil, e + } + + // Initialize a new instance of the protobuf message to unmarshal the + // raw data into. + m := s.typ.New().Interface() + err := protojson.Unmarshal(raw, m) + + return m, err +} + +// Close closes the stream so that resources are cleaned up. +func (s *ProtoJSONStream) Close() error { + // Dereference the *json.Decoder so that the memory is gc'd. + s.stream = nil + s.closed = true + + return s.reader.Close() +} diff --git a/vendor/github.com/googleapis/gax-go/v2/release-please-config.json b/vendor/github.com/googleapis/gax-go/v2/release-please-config.json new file mode 100644 index 00000000000..61ee266a159 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/release-please-config.json @@ -0,0 +1,10 @@ +{ + "release-type": "go-yoshi", + "separate-pull-requests": true, + "include-component-in-tag": false, + "packages": { + "v2": { + "component": "v2" + } + } +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE new file mode 100644 index 00000000000..e87a115e462 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md new file mode 100644 index 00000000000..036e5313fc8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/README.md @@ -0,0 +1,30 @@ +# cleanhttp + +Functions for accessing "clean" Go http.Client values + +------------- + +The Go standard library contains a default `http.Client` called +`http.DefaultClient`. It is a common idiom in Go code to start with +`http.DefaultClient` and tweak it as necessary, and in fact, this is +encouraged; from the `http` package documentation: + +> The Client's Transport typically has internal state (cached TCP connections), +so Clients should be reused instead of created as needed. Clients are safe for +concurrent use by multiple goroutines. + +Unfortunately, this is a shared value, and it is not uncommon for libraries to +assume that they are free to modify it at will. With enough dependencies, it +can be very easy to encounter strange problems and race conditions due to +manipulation of this shared value across libraries and goroutines (clients are +safe for concurrent use, but writing values to the client struct itself is not +protected). + +Making things worse is the fact that a bare `http.Client` will use a default +`http.Transport` called `http.DefaultTransport`, which is another global value +that behaves the same way. So it is not simply enough to replace +`http.DefaultClient` with `&http.Client{}`. + +This repository provides some simple functions to get a "clean" `http.Client` +-- one that uses the same default values as the Go standard library, but +returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go new file mode 100644 index 00000000000..fe28d15b6f9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -0,0 +1,58 @@ +package cleanhttp + +import ( + "net" + "net/http" + "runtime" + "time" +) + +// DefaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport, but with idle connections and keepalives disabled. +func DefaultTransport() *http.Transport { + transport := DefaultPooledTransport() + transport.DisableKeepAlives = true + transport.MaxIdleConnsPerHost = -1 + return transport +} + +// DefaultPooledTransport returns a new http.Transport with similar default +// values to http.DefaultTransport. Do not use this for transient transports as +// it can leak file descriptors over time. Only use this for transports that +// will be re-used for the same host(s). +func DefaultPooledTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + ForceAttemptHTTP2: true, + MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, + } + return transport +} + +// DefaultClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// keepalives disabled. +func DefaultClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// DefaultPooledClient returns a new http.Client with similar default values to +// http.Client, but with a shared Transport. Do not use this function for +// transient clients as it can leak file descriptors over time. Only use this +// for clients that will be re-used for the same host(s). +func DefaultPooledClient() *http.Client { + return &http.Client{ + Transport: DefaultPooledTransport(), + } +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go new file mode 100644 index 00000000000..05841092a7b --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/doc.go @@ -0,0 +1,20 @@ +// Package cleanhttp offers convenience utilities for acquiring "clean" +// http.Transport and http.Client structs. +// +// Values set on http.DefaultClient and http.DefaultTransport affect all +// callers. This can have detrimental effects, esepcially in TLS contexts, +// where client or root certificates set to talk to multiple endpoints can end +// up displacing each other, leading to hard-to-debug issues. This package +// provides non-shared http.Client and http.Transport structs to ensure that +// the configuration will not be overwritten by other parts of the application +// or dependencies. +// +// The DefaultClient and DefaultTransport functions disable idle connections +// and keepalives. Without ensuring that idle connections are closed before +// garbage collection, short-term clients/transports can leak file descriptors, +// eventually leading to "too many open files" errors. If you will be +// connecting to the same hosts repeatedly from the same client, you can use +// DefaultPooledClient to receive a client that has connection pooling +// semantics similar to http.DefaultClient. +// +package cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go new file mode 100644 index 00000000000..3c845dc0dc6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go @@ -0,0 +1,48 @@ +package cleanhttp + +import ( + "net/http" + "strings" + "unicode" +) + +// HandlerInput provides input options to cleanhttp's handlers +type HandlerInput struct { + ErrStatus int +} + +// PrintablePathCheckHandler is a middleware that ensures the request path +// contains only printable runes. +func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { + // Nil-check on input to make it optional + if input == nil { + input = &HandlerInput{ + ErrStatus: http.StatusBadRequest, + } + } + + // Default to http.StatusBadRequest on error + if input.ErrStatus == 0 { + input.ErrStatus = http.StatusBadRequest + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r != nil { + // Check URL path for non-printable characters + idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + if idx != -1 { + w.WriteHeader(input.ErrStatus) + return + } + + if next != nil { + next.ServeHTTP(w, r) + } + } + + return + }) +} diff --git a/vendor/github.com/hashicorp/go-hclog/.gitignore b/vendor/github.com/hashicorp/go-hclog/.gitignore new file mode 100644 index 00000000000..42cc4105ff4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/.gitignore @@ -0,0 +1 @@ +.idea* \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/hashicorp/go-hclog/LICENSE new file mode 100644 index 00000000000..abaf1e45f2a --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 HashiCorp + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md new file mode 100644 index 00000000000..21a17c5af39 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -0,0 +1,148 @@ +# go-hclog + +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[godocs]: https://godoc.org/github.com/hashicorp/go-hclog + +`go-hclog` is a package for Go that provides a simple key/value logging +interface for use in development and production environments. + +It provides logging levels that provide decreased output based upon the +desired amount of output, unlike the standard library `log` package. + +It provides `Printf` style logging of values via `hclog.Fmt()`. + +It provides a human readable output mode for use in development as well as +JSON output mode for production. + +## Stability Note + +This library has reached 1.0 stability. Its API can be considered solidified +and promised through future versions. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-hclog`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-hclog + +## Usage + +### Use the global logger + +```go +hclog.Default().Info("hello world") +``` + +```text +2017-07-05T16:15:55.167-0700 [INFO ] hello world +``` + +(Note timestamps are removed in future examples for brevity.) + +### Create a new logger + +```go +appLogger := hclog.New(&hclog.LoggerOptions{ + Name: "my-app", + Level: hclog.LevelFromString("DEBUG"), +}) +``` + +### Emit an Info level message with 2 key/value pairs + +```go +input := "5.5" +_, err := strconv.ParseInt(input, 10, 32) +if err != nil { + appLogger.Info("Invalid input for ParseInt", "input", input, "error", err) +} +``` + +```text +... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax" +``` + +### Create a new Logger for a major subsystem + +```go +subsystemLogger := appLogger.Named("transport") +subsystemLogger.Info("we are transporting something") +``` + +```text +... [INFO ] my-app.transport: we are transporting something +``` + +Notice that logs emitted by `subsystemLogger` contain `my-app.transport`, +reflecting both the application and subsystem names. + +### Create a new Logger with fixed key/value pairs + +Using `With()` will include a specific key-value pair in all messages emitted +by that logger. + +```go +requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363" +requestLogger := subsystemLogger.With("request", requestID) +requestLogger.Info("we are transporting a request") +``` + +```text +... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363 +``` + +This allows sub Loggers to be context specific without having to thread that +into all the callers. + +### Using `hclog.Fmt()` + +```go +totalBandwidth := 200 +appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth)) +``` + +```text +... [INFO ] my-app: total bandwidth exceeded: bandwidth="200 GB/s" +``` + +### Use this with code that uses the standard library logger + +If you want to use the standard library's `log.Logger` interface you can wrap +`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use +it with the familiar `Println()`, `Printf()`, etc. For example: + +```go +stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{ + InferLevels: true, +}) +// Printf() is provided by stdlib log.Logger interface, not hclog.Logger +stdLogger.Printf("[DEBUG] %+v", stdLogger) +``` + +```text +... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]} +``` + +Alternatively, you may configure the system-wide logger: + +```go +// log the standard logger from 'import "log"' +log.SetOutput(appLogger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true})) +log.SetPrefix("") +log.SetFlags(0) + +log.Printf("[DEBUG] %d", 42) +``` + +```text +... [DEBUG] my-app: 42 +``` + +Notice that if `appLogger` is initialized with the `INFO` log level _and_ you +specify `InferLevels: true`, you will not see any output here. You must change +`appLogger` to `DEBUG` to see output. See the docs for more information. + +If the log lines start with a timestamp you can use the +`InferLevelsWithTimestamp` option to try and ignore them. diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go new file mode 100644 index 00000000000..99cc176a416 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go @@ -0,0 +1,29 @@ +//go:build !windows +// +build !windows + +package hclog + +import ( + "github.com/mattn/go-isatty" +) + +// setColorization will mutate the values of this logger +// to appropriately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + switch opts.Color { + case ColorOff: + fallthrough + case ForceColor: + return + case AutoColor: + fi := l.checkWriterIsFile() + isUnixTerm := isatty.IsTerminal(fi.Fd()) + isCygwinTerm := isatty.IsCygwinTerminal(fi.Fd()) + isTerm := isUnixTerm || isCygwinTerm + if !isTerm { + l.headerColor = ColorOff + l.writer.color = ColorOff + } + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go new file mode 100644 index 00000000000..26f8cef8d12 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go @@ -0,0 +1,38 @@ +//go:build windows +// +build windows + +package hclog + +import ( + "os" + + colorable "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +// setColorization will mutate the values of this logger +// to appropriately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + switch opts.Color { + case ColorOff: + return + case ForceColor: + fi := l.checkWriterIsFile() + l.writer.w = colorable.NewColorable(fi) + case AutoColor: + fi := l.checkWriterIsFile() + isUnixTerm := isatty.IsTerminal(os.Stdout.Fd()) + isCygwinTerm := isatty.IsCygwinTerminal(os.Stdout.Fd()) + isTerm := isUnixTerm || isCygwinTerm + if !isTerm { + l.writer.color = ColorOff + l.headerColor = ColorOff + return + } + + if l.headerColor == ColorOff { + l.writer.w = colorable.NewColorable(fi) + } + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/context.go b/vendor/github.com/hashicorp/go-hclog/context.go new file mode 100644 index 00000000000..7815f501942 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/context.go @@ -0,0 +1,38 @@ +package hclog + +import ( + "context" +) + +// WithContext inserts a logger into the context and is retrievable +// with FromContext. The optional args can be set with the same syntax as +// Logger.With to set fields on the inserted logger. This will not modify +// the logger argument in-place. +func WithContext(ctx context.Context, logger Logger, args ...interface{}) context.Context { + // While we could call logger.With even with zero args, we have this + // check to avoid unnecessary allocations around creating a copy of a + // logger. + if len(args) > 0 { + logger = logger.With(args...) + } + + return context.WithValue(ctx, contextKey, logger) +} + +// FromContext returns a logger from the context. This will return L() +// (the default logger) if no logger is found in the context. Therefore, +// this will never return a nil value. +func FromContext(ctx context.Context) Logger { + logger, _ := ctx.Value(contextKey).(Logger) + if logger == nil { + return L() + } + + return logger +} + +// Unexported new type so that our context key never collides with another. +type contextKeyType struct{} + +// contextKey is the key used for the context to store the logger. +var contextKey = contextKeyType{} diff --git a/vendor/github.com/hashicorp/go-hclog/exclude.go b/vendor/github.com/hashicorp/go-hclog/exclude.go new file mode 100644 index 00000000000..cfd4307a803 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/exclude.go @@ -0,0 +1,71 @@ +package hclog + +import ( + "regexp" + "strings" +) + +// ExcludeByMessage provides a simple way to build a list of log messages that +// can be queried and matched. This is meant to be used with the Exclude +// option on Options to suppress log messages. This does not hold any mutexs +// within itself, so normal usage would be to Add entries at setup and none after +// Exclude is going to be called. Exclude is called with a mutex held within +// the Logger, so that doesn't need to use a mutex. Example usage: +// +// f := new(ExcludeByMessage) +// f.Add("Noisy log message text") +// appLogger.Exclude = f.Exclude +type ExcludeByMessage struct { + messages map[string]struct{} +} + +// Add a message to be filtered. Do not call this after Exclude is to be called +// due to concurrency issues. +func (f *ExcludeByMessage) Add(msg string) { + if f.messages == nil { + f.messages = make(map[string]struct{}) + } + + f.messages[msg] = struct{}{} +} + +// Return true if the given message should be included +func (f *ExcludeByMessage) Exclude(level Level, msg string, args ...interface{}) bool { + _, ok := f.messages[msg] + return ok +} + +// ExcludeByPrefix is a simple type to match a message string that has a common prefix. +type ExcludeByPrefix string + +// Matches an message that starts with the prefix. +func (p ExcludeByPrefix) Exclude(level Level, msg string, args ...interface{}) bool { + return strings.HasPrefix(msg, string(p)) +} + +// ExcludeByRegexp takes a regexp and uses it to match a log message string. If it matches +// the log entry is excluded. +type ExcludeByRegexp struct { + Regexp *regexp.Regexp +} + +// Exclude the log message if the message string matches the regexp +func (e ExcludeByRegexp) Exclude(level Level, msg string, args ...interface{}) bool { + return e.Regexp.MatchString(msg) +} + +// ExcludeFuncs is a slice of functions that will called to see if a log entry +// should be filtered or not. It stops calling functions once at least one returns +// true. +type ExcludeFuncs []func(level Level, msg string, args ...interface{}) bool + +// Calls each function until one of them returns true +func (ff ExcludeFuncs) Exclude(level Level, msg string, args ...interface{}) bool { + for _, f := range ff { + if f(level, msg, args...) { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go new file mode 100644 index 00000000000..48ff1f3a4e9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -0,0 +1,64 @@ +package hclog + +import ( + "sync" + "time" +) + +var ( + protect sync.Once + def Logger + + // DefaultOptions is used to create the Default logger. These are read + // only when the Default logger is created, so set them as soon as the + // process starts. + DefaultOptions = &LoggerOptions{ + Level: DefaultLevel, + Output: DefaultOutput, + TimeFn: time.Now, + } +) + +// Default returns a globally held logger. This can be a good starting +// place, and then you can use .With() and .Named() to create sub-loggers +// to be used in more specific contexts. +// The value of the Default logger can be set via SetDefault() or by +// changing the options in DefaultOptions. +// +// This method is goroutine safe, returning a global from memory, but +// care should be used if SetDefault() is called it random times +// in the program as that may result in race conditions and an unexpected +// Logger being returned. +func Default() Logger { + protect.Do(func() { + // If SetDefault was used before Default() was called, we need to + // detect that here. + if def == nil { + def = New(DefaultOptions) + } + }) + + return def +} + +// L is a short alias for Default(). +func L() Logger { + return Default() +} + +// SetDefault changes the logger to be returned by Default()and L() +// to the one given. This allows packages to use the default logger +// and have higher level packages change it to match the execution +// environment. It returns any old default if there is one. +// +// NOTE: This is expected to be called early in the program to setup +// a default logger. As such, it does not attempt to make itself +// not racy with regard to the value of the default logger. Ergo +// if it is called in goroutines, you may experience race conditions +// with other goroutines retrieving the default logger. Basically, +// don't do that. +func SetDefault(log Logger) Logger { + old := def + def = log + return old +} diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go new file mode 100644 index 00000000000..ff42f1bfc1d --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -0,0 +1,204 @@ +package hclog + +import ( + "io" + "log" + "sync" + "sync/atomic" +) + +var _ Logger = &interceptLogger{} + +type interceptLogger struct { + Logger + + mu *sync.Mutex + sinkCount *int32 + Sinks map[SinkAdapter]struct{} +} + +func NewInterceptLogger(opts *LoggerOptions) InterceptLogger { + l := newLogger(opts) + if l.callerOffset > 0 { + // extra frames for interceptLogger.{Warn,Info,Log,etc...}, and interceptLogger.log + l.callerOffset += 2 + } + intercept := &interceptLogger{ + Logger: l, + mu: new(sync.Mutex), + sinkCount: new(int32), + Sinks: make(map[SinkAdapter]struct{}), + } + + atomic.StoreInt32(intercept.sinkCount, 0) + + return intercept +} + +func (i *interceptLogger) Log(level Level, msg string, args ...interface{}) { + i.log(level, msg, args...) +} + +// log is used to make the caller stack frame lookup consistent. If Warn,Info,etc +// all called Log then direct calls to Log would have a different stack frame +// depth. By having all the methods call the same helper we ensure the stack +// frame depth is the same. +func (i *interceptLogger) log(level Level, msg string, args ...interface{}) { + i.Logger.Log(level, msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), level, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at TRACE level to log and sinks +func (i *interceptLogger) Trace(msg string, args ...interface{}) { + i.log(Trace, msg, args...) +} + +// Emit the message and args at DEBUG level to log and sinks +func (i *interceptLogger) Debug(msg string, args ...interface{}) { + i.log(Debug, msg, args...) +} + +// Emit the message and args at INFO level to log and sinks +func (i *interceptLogger) Info(msg string, args ...interface{}) { + i.log(Info, msg, args...) +} + +// Emit the message and args at WARN level to log and sinks +func (i *interceptLogger) Warn(msg string, args ...interface{}) { + i.log(Warn, msg, args...) +} + +// Emit the message and args at ERROR level to log and sinks +func (i *interceptLogger) Error(msg string, args ...interface{}) { + i.log(Error, msg, args...) +} + +func (i *interceptLogger) retrieveImplied(args ...interface{}) []interface{} { + top := i.Logger.ImpliedArgs() + + cp := make([]interface{}, len(top)+len(args)) + copy(cp, top) + copy(cp[len(top):], args) + + return cp +} + +// Create a new sub-Logger that a name descending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) Named(name string) Logger { + return i.NamedIntercept(name) +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamed(name string) Logger { + return i.ResetNamedIntercept(name) +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) NamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + sub.Logger = i.Logger.Named(name) + return &sub +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + sub.Logger = i.Logger.ResetNamed(name) + return &sub +} + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (i *interceptLogger) With(args ...interface{}) Logger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.With(args...) + + return &sub +} + +// RegisterSink attaches a SinkAdapter to interceptLoggers sinks. +func (i *interceptLogger) RegisterSink(sink SinkAdapter) { + i.mu.Lock() + defer i.mu.Unlock() + + i.Sinks[sink] = struct{}{} + + atomic.AddInt32(i.sinkCount, 1) +} + +// DeregisterSink removes a SinkAdapter from interceptLoggers sinks. +func (i *interceptLogger) DeregisterSink(sink SinkAdapter) { + i.mu.Lock() + defer i.mu.Unlock() + + delete(i.Sinks, sink) + + atomic.AddInt32(i.sinkCount, -1) +} + +func (i *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger { + return i.StandardLogger(opts) +} + +func (i *interceptLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(i.StandardWriter(opts), "", 0) +} + +func (i *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer { + return i.StandardWriter(opts) +} + +func (i *interceptLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{ + log: i, + inferLevels: opts.InferLevels, + inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp, + forceLevel: opts.ForceLevel, + } +} + +func (i *interceptLogger) ResetOutput(opts *LoggerOptions) error { + if or, ok := i.Logger.(OutputResettable); ok { + return or.ResetOutput(opts) + } else { + return nil + } +} + +func (i *interceptLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error { + if or, ok := i.Logger.(OutputResettable); ok { + return or.ResetOutputWithFlush(opts, flushable) + } else { + return nil + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go new file mode 100644 index 00000000000..e4cd8eddcd2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -0,0 +1,911 @@ +package hclog + +import ( + "bytes" + "encoding" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode" + "unicode/utf8" + + "github.com/fatih/color" +) + +// TimeFormat is the time format to use for plain (non-JSON) output. +// This is a version of RFC3339 that contains millisecond precision. +const TimeFormat = "2006-01-02T15:04:05.000Z0700" + +// TimeFormatJSON is the time format to use for JSON output. +// This is a version of RFC3339 that contains microsecond precision. +const TimeFormatJSON = "2006-01-02T15:04:05.000000Z07:00" + +// errJsonUnsupportedTypeMsg is included in log json entries, if an arg cannot be serialized to json +const errJsonUnsupportedTypeMsg = "logging contained values that don't serialize to json" + +var ( + _levelToBracket = map[Level]string{ + Debug: "[DEBUG]", + Trace: "[TRACE]", + Info: "[INFO] ", + Warn: "[WARN] ", + Error: "[ERROR]", + } + + _levelToColor = map[Level]*color.Color{ + Debug: color.New(color.FgHiWhite), + Trace: color.New(color.FgHiGreen), + Info: color.New(color.FgHiBlue), + Warn: color.New(color.FgHiYellow), + Error: color.New(color.FgHiRed), + } + + faintBoldColor = color.New(color.Faint, color.Bold) + faintColor = color.New(color.Faint) + faintMultiLinePrefix = faintColor.Sprint(" | ") + faintFieldSeparator = faintColor.Sprint("=") + faintFieldSeparatorWithNewLine = faintColor.Sprint("=\n") +) + +// Make sure that intLogger is a Logger +var _ Logger = &intLogger{} + +// intLogger is an internal logger implementation. Internal in that it is +// defined entirely by this package. +type intLogger struct { + json bool + callerOffset int + name string + timeFormat string + timeFn TimeFunction + disableTime bool + + // This is an interface so that it's shared by any derived loggers, since + // those derived loggers share the bufio.Writer as well. + mutex Locker + writer *writer + level *int32 + + headerColor ColorOption + fieldColor ColorOption + + implied []interface{} + + exclude func(level Level, msg string, args ...interface{}) bool + + // create subloggers with their own level setting + independentLevels bool +} + +// New returns a configured logger. +func New(opts *LoggerOptions) Logger { + return newLogger(opts) +} + +// NewSinkAdapter returns a SinkAdapter with configured settings +// defined by LoggerOptions +func NewSinkAdapter(opts *LoggerOptions) SinkAdapter { + l := newLogger(opts) + if l.callerOffset > 0 { + // extra frames for interceptLogger.{Warn,Info,Log,etc...}, and SinkAdapter.Accept + l.callerOffset += 2 + } + return l +} + +func newLogger(opts *LoggerOptions) *intLogger { + if opts == nil { + opts = &LoggerOptions{} + } + + output := opts.Output + if output == nil { + output = DefaultOutput + } + + level := opts.Level + if level == NoLevel { + level = DefaultLevel + } + + mutex := opts.Mutex + if mutex == nil { + mutex = new(sync.Mutex) + } + + var ( + primaryColor ColorOption = ColorOff + headerColor ColorOption = ColorOff + fieldColor ColorOption = ColorOff + ) + switch { + case opts.ColorHeaderOnly: + headerColor = opts.Color + case opts.ColorHeaderAndFields: + fieldColor = opts.Color + headerColor = opts.Color + default: + primaryColor = opts.Color + } + + l := &intLogger{ + json: opts.JSONFormat, + name: opts.Name, + timeFormat: TimeFormat, + timeFn: time.Now, + disableTime: opts.DisableTime, + mutex: mutex, + writer: newWriter(output, primaryColor), + level: new(int32), + exclude: opts.Exclude, + independentLevels: opts.IndependentLevels, + headerColor: headerColor, + fieldColor: fieldColor, + } + if opts.IncludeLocation { + l.callerOffset = offsetIntLogger + opts.AdditionalLocationOffset + } + + if l.json { + l.timeFormat = TimeFormatJSON + } + if opts.TimeFn != nil { + l.timeFn = opts.TimeFn + } + if opts.TimeFormat != "" { + l.timeFormat = opts.TimeFormat + } + + l.setColorization(opts) + + atomic.StoreInt32(l.level, int32(level)) + + return l +} + +// offsetIntLogger is the stack frame offset in the call stack for the caller to +// one of the Warn, Info, Log, etc methods. +const offsetIntLogger = 3 + +// Log a message and a set of key/value pairs if the given level is at +// or more severe that the threshold configured in the Logger. +func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) { + if level < Level(atomic.LoadInt32(l.level)) { + return + } + + t := l.timeFn() + + l.mutex.Lock() + defer l.mutex.Unlock() + + if l.exclude != nil && l.exclude(level, msg, args...) { + return + } + + if l.json { + l.logJSON(t, name, level, msg, args...) + } else { + l.logPlain(t, name, level, msg, args...) + } + + l.writer.Flush(level) +} + +// Cleanup a path by returning the last 2 segments of the path only. +func trimCallerPath(path string) string { + // lovely borrowed from zap + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + + // Find the last separator. + idx := strings.LastIndexByte(path, '/') + if idx == -1 { + return path + } + + // Find the penultimate separator. + idx = strings.LastIndexByte(path[:idx], '/') + if idx == -1 { + return path + } + + return path[idx+1:] +} + +// isNormal indicates if the rune is one allowed to exist as an unquoted +// string value. This is a subset of ASCII, `-` through `~`. +func isNormal(r rune) bool { + return 0x2D <= r && r <= 0x7E // - through ~ +} + +// needsQuoting returns false if all the runes in string are normal, according +// to isNormal +func needsQuoting(str string) bool { + for _, r := range str { + if !isNormal(r) { + return true + } + } + + return false +} + +// logPlain is the non-JSON logging format function which writes directly +// to the underlying writer the logger was initialized with. +// +// If the logger was initialized with a color function, it also handles +// applying the color to the log message. +// +// Color Options +// 1. No color. +// 2. Color the whole log line, based on the level. +// 3. Color only the header (level) part of the log line. +// 4. Color both the header and fields of the log line. +// +func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) { + + if !l.disableTime { + l.writer.WriteString(t.Format(l.timeFormat)) + l.writer.WriteByte(' ') + } + + s, ok := _levelToBracket[level] + if ok { + if l.headerColor != ColorOff { + color := _levelToColor[level] + color.Fprint(l.writer, s) + } else { + l.writer.WriteString(s) + } + } else { + l.writer.WriteString("[?????]") + } + + if l.callerOffset > 0 { + if _, file, line, ok := runtime.Caller(l.callerOffset); ok { + l.writer.WriteByte(' ') + l.writer.WriteString(trimCallerPath(file)) + l.writer.WriteByte(':') + l.writer.WriteString(strconv.Itoa(line)) + l.writer.WriteByte(':') + } + } + + l.writer.WriteByte(' ') + + if name != "" { + l.writer.WriteString(name) + if msg != "" { + l.writer.WriteString(": ") + l.writer.WriteString(msg) + } + } else if msg != "" { + l.writer.WriteString(msg) + } + + args = append(l.implied, args...) + + var stacktrace CapturedStacktrace + + if len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + stacktrace = cs + } else { + extra := args[len(args)-1] + args = append(args[:len(args)-1], MissingKey, extra) + } + } + + l.writer.WriteByte(':') + + // Handle the field arguments, which come in pairs (key=val). + FOR: + for i := 0; i < len(args); i = i + 2 { + var ( + key string + val string + raw bool + ) + + // Convert the field value to a string. + switch st := args[i+1].(type) { + case string: + val = st + if st == "" { + val = `""` + raw = true + } + case int: + val = strconv.FormatInt(int64(st), 10) + case int64: + val = strconv.FormatInt(int64(st), 10) + case int32: + val = strconv.FormatInt(int64(st), 10) + case int16: + val = strconv.FormatInt(int64(st), 10) + case int8: + val = strconv.FormatInt(int64(st), 10) + case uint: + val = strconv.FormatUint(uint64(st), 10) + case uint64: + val = strconv.FormatUint(uint64(st), 10) + case uint32: + val = strconv.FormatUint(uint64(st), 10) + case uint16: + val = strconv.FormatUint(uint64(st), 10) + case uint8: + val = strconv.FormatUint(uint64(st), 10) + case Hex: + val = "0x" + strconv.FormatUint(uint64(st), 16) + case Octal: + val = "0" + strconv.FormatUint(uint64(st), 8) + case Binary: + val = "0b" + strconv.FormatUint(uint64(st), 2) + case CapturedStacktrace: + stacktrace = st + continue FOR + case Format: + val = fmt.Sprintf(st[0].(string), st[1:]...) + case Quote: + raw = true + val = strconv.Quote(string(st)) + default: + v := reflect.ValueOf(st) + if v.Kind() == reflect.Slice { + val = l.renderSlice(v) + raw = true + } else { + val = fmt.Sprintf("%v", st) + } + } + + // Convert the field key to a string. + switch st := args[i].(type) { + case string: + key = st + default: + key = fmt.Sprintf("%s", st) + } + + // Optionally apply the ANSI "faint" and "bold" + // SGR values to the key. + if l.fieldColor != ColorOff { + key = faintBoldColor.Sprint(key) + } + + // Values may contain multiple lines, and that format + // is preserved, with each line prefixed with a " | " + // to show it's part of a collection of lines. + // + // Values may also need quoting, if not all the runes + // in the value string are "normal", like if they + // contain ANSI escape sequences. + if strings.Contains(val, "\n") { + l.writer.WriteString("\n ") + l.writer.WriteString(key) + if l.fieldColor != ColorOff { + l.writer.WriteString(faintFieldSeparatorWithNewLine) + writeIndent(l.writer, val, faintMultiLinePrefix) + } else { + l.writer.WriteString("=\n") + writeIndent(l.writer, val, " | ") + } + l.writer.WriteString(" ") + } else if !raw && needsQuoting(val) { + l.writer.WriteByte(' ') + l.writer.WriteString(key) + if l.fieldColor != ColorOff { + l.writer.WriteString(faintFieldSeparator) + } else { + l.writer.WriteByte('=') + } + l.writer.WriteByte('"') + writeEscapedForOutput(l.writer, val, true) + l.writer.WriteByte('"') + } else { + l.writer.WriteByte(' ') + l.writer.WriteString(key) + if l.fieldColor != ColorOff { + l.writer.WriteString(faintFieldSeparator) + } else { + l.writer.WriteByte('=') + } + l.writer.WriteString(val) + } + } + } + + l.writer.WriteString("\n") + + if stacktrace != "" { + l.writer.WriteString(string(stacktrace)) + l.writer.WriteString("\n") + } +} + +func writeIndent(w *writer, str string, indent string) { + for { + nl := strings.IndexByte(str, "\n"[0]) + if nl == -1 { + if str != "" { + w.WriteString(indent) + writeEscapedForOutput(w, str, false) + w.WriteString("\n") + } + return + } + + w.WriteString(indent) + writeEscapedForOutput(w, str[:nl], false) + w.WriteString("\n") + str = str[nl+1:] + } +} + +func needsEscaping(str string) bool { + for _, b := range str { + if !unicode.IsPrint(b) || b == '"' { + return true + } + } + + return false +} + +const ( + lowerhex = "0123456789abcdef" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +func writeEscapedForOutput(w io.Writer, str string, escapeQuotes bool) { + if !needsEscaping(str) { + w.Write([]byte(str)) + return + } + + bb := bufPool.Get().(*bytes.Buffer) + bb.Reset() + + defer bufPool.Put(bb) + + for _, r := range str { + if escapeQuotes && r == '"' { + bb.WriteString(`\"`) + } else if unicode.IsPrint(r) { + bb.WriteRune(r) + } else { + switch r { + case '\a': + bb.WriteString(`\a`) + case '\b': + bb.WriteString(`\b`) + case '\f': + bb.WriteString(`\f`) + case '\n': + bb.WriteString(`\n`) + case '\r': + bb.WriteString(`\r`) + case '\t': + bb.WriteString(`\t`) + case '\v': + bb.WriteString(`\v`) + default: + switch { + case r < ' ': + bb.WriteString(`\x`) + bb.WriteByte(lowerhex[byte(r)>>4]) + bb.WriteByte(lowerhex[byte(r)&0xF]) + case !utf8.ValidRune(r): + r = 0xFFFD + fallthrough + case r < 0x10000: + bb.WriteString(`\u`) + for s := 12; s >= 0; s -= 4 { + bb.WriteByte(lowerhex[r>>uint(s)&0xF]) + } + default: + bb.WriteString(`\U`) + for s := 28; s >= 0; s -= 4 { + bb.WriteByte(lowerhex[r>>uint(s)&0xF]) + } + } + } + } + } + + w.Write(bb.Bytes()) +} + +func (l *intLogger) renderSlice(v reflect.Value) string { + var buf bytes.Buffer + + buf.WriteRune('[') + + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteString(", ") + } + + sv := v.Index(i) + + var val string + + switch sv.Kind() { + case reflect.String: + val = strconv.Quote(sv.String()) + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + val = strconv.FormatInt(sv.Int(), 10) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + val = strconv.FormatUint(sv.Uint(), 10) + default: + val = fmt.Sprintf("%v", sv.Interface()) + if strings.ContainsAny(val, " \t\n\r") { + val = strconv.Quote(val) + } + } + + buf.WriteString(val) + } + + buf.WriteRune(']') + + return buf.String() +} + +// JSON logging function +func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, args ...interface{}) { + vals := l.jsonMapEntry(t, name, level, msg) + args = append(l.implied, args...) + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + vals["stacktrace"] = cs + } else { + extra := args[len(args)-1] + args = append(args[:len(args)-1], MissingKey, extra) + } + } + + for i := 0; i < len(args); i = i + 2 { + val := args[i+1] + switch sv := val.(type) { + case error: + // Check if val is of type error. If error type doesn't + // implement json.Marshaler or encoding.TextMarshaler + // then set val to err.Error() so that it gets marshaled + switch sv.(type) { + case json.Marshaler, encoding.TextMarshaler: + default: + val = sv.Error() + } + case Format: + val = fmt.Sprintf(sv[0].(string), sv[1:]...) + } + + var key string + + switch st := args[i].(type) { + case string: + key = st + default: + key = fmt.Sprintf("%s", st) + } + vals[key] = val + } + } + + err := json.NewEncoder(l.writer).Encode(vals) + if err != nil { + if _, ok := err.(*json.UnsupportedTypeError); ok { + plainVal := l.jsonMapEntry(t, name, level, msg) + plainVal["@warn"] = errJsonUnsupportedTypeMsg + + json.NewEncoder(l.writer).Encode(plainVal) + } + } +} + +func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg string) map[string]interface{} { + vals := map[string]interface{}{ + "@message": msg, + } + if !l.disableTime { + vals["@timestamp"] = t.Format(l.timeFormat) + } + + var levelStr string + switch level { + case Error: + levelStr = "error" + case Warn: + levelStr = "warn" + case Info: + levelStr = "info" + case Debug: + levelStr = "debug" + case Trace: + levelStr = "trace" + default: + levelStr = "all" + } + + vals["@level"] = levelStr + + if name != "" { + vals["@module"] = name + } + + if l.callerOffset > 0 { + if _, file, line, ok := runtime.Caller(l.callerOffset + 1); ok { + vals["@caller"] = fmt.Sprintf("%s:%d", file, line) + } + } + return vals +} + +// Emit the message and args at the provided level +func (l *intLogger) Log(level Level, msg string, args ...interface{}) { + l.log(l.Name(), level, msg, args...) +} + +// Emit the message and args at DEBUG level +func (l *intLogger) Debug(msg string, args ...interface{}) { + l.log(l.Name(), Debug, msg, args...) +} + +// Emit the message and args at TRACE level +func (l *intLogger) Trace(msg string, args ...interface{}) { + l.log(l.Name(), Trace, msg, args...) +} + +// Emit the message and args at INFO level +func (l *intLogger) Info(msg string, args ...interface{}) { + l.log(l.Name(), Info, msg, args...) +} + +// Emit the message and args at WARN level +func (l *intLogger) Warn(msg string, args ...interface{}) { + l.log(l.Name(), Warn, msg, args...) +} + +// Emit the message and args at ERROR level +func (l *intLogger) Error(msg string, args ...interface{}) { + l.log(l.Name(), Error, msg, args...) +} + +// Indicate that the logger would emit TRACE level logs +func (l *intLogger) IsTrace() bool { + return Level(atomic.LoadInt32(l.level)) == Trace +} + +// Indicate that the logger would emit DEBUG level logs +func (l *intLogger) IsDebug() bool { + return Level(atomic.LoadInt32(l.level)) <= Debug +} + +// Indicate that the logger would emit INFO level logs +func (l *intLogger) IsInfo() bool { + return Level(atomic.LoadInt32(l.level)) <= Info +} + +// Indicate that the logger would emit WARN level logs +func (l *intLogger) IsWarn() bool { + return Level(atomic.LoadInt32(l.level)) <= Warn +} + +// Indicate that the logger would emit ERROR level logs +func (l *intLogger) IsError() bool { + return Level(atomic.LoadInt32(l.level)) <= Error +} + +const MissingKey = "EXTRA_VALUE_AT_END" + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (l *intLogger) With(args ...interface{}) Logger { + var extra interface{} + + if len(args)%2 != 0 { + extra = args[len(args)-1] + args = args[:len(args)-1] + } + + sl := l.copy() + + result := make(map[string]interface{}, len(l.implied)+len(args)) + keys := make([]string, 0, len(l.implied)+len(args)) + + // Read existing args, store map and key for consistent sorting + for i := 0; i < len(l.implied); i += 2 { + key := l.implied[i].(string) + keys = append(keys, key) + result[key] = l.implied[i+1] + } + // Read new args, store map and key for consistent sorting + for i := 0; i < len(args); i += 2 { + key := args[i].(string) + _, exists := result[key] + if !exists { + keys = append(keys, key) + } + result[key] = args[i+1] + } + + // Sort keys to be consistent + sort.Strings(keys) + + sl.implied = make([]interface{}, 0, len(l.implied)+len(args)) + for _, k := range keys { + sl.implied = append(sl.implied, k) + sl.implied = append(sl.implied, result[k]) + } + + if extra != nil { + sl.implied = append(sl.implied, MissingKey, extra) + } + + return sl +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +func (l *intLogger) Named(name string) Logger { + sl := l.copy() + + if sl.name != "" { + sl.name = sl.name + "." + name + } else { + sl.name = name + } + + return sl +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. +func (l *intLogger) ResetNamed(name string) Logger { + sl := l.copy() + + sl.name = name + + return sl +} + +func (l *intLogger) ResetOutput(opts *LoggerOptions) error { + if opts.Output == nil { + return errors.New("given output is nil") + } + + l.mutex.Lock() + defer l.mutex.Unlock() + + return l.resetOutput(opts) +} + +func (l *intLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error { + if opts.Output == nil { + return errors.New("given output is nil") + } + if flushable == nil { + return errors.New("flushable is nil") + } + + l.mutex.Lock() + defer l.mutex.Unlock() + + if err := flushable.Flush(); err != nil { + return err + } + + return l.resetOutput(opts) +} + +func (l *intLogger) resetOutput(opts *LoggerOptions) error { + l.writer = newWriter(opts.Output, opts.Color) + l.setColorization(opts) + return nil +} + +// Update the logging level on-the-fly. This will affect all subloggers as +// well. +func (l *intLogger) SetLevel(level Level) { + atomic.StoreInt32(l.level, int32(level)) +} + +// Create a *log.Logger that will send it's data through this Logger. This +// allows packages that expect to be using the standard library log to actually +// use this logger. +func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(l.StandardWriter(opts), "", 0) +} + +func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + newLog := *l + if l.callerOffset > 0 { + // the stack is + // logger.printf() -> l.Output() ->l.out.writer(hclog:stdlogAdaptor.write) -> hclog:stdlogAdaptor.dispatch() + // So plus 4. + newLog.callerOffset = l.callerOffset + 4 + } + return &stdlogAdapter{ + log: &newLog, + inferLevels: opts.InferLevels, + inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp, + forceLevel: opts.ForceLevel, + } +} + +// checks if the underlying io.Writer is a file, and +// panics if not. For use by colorization. +func (l *intLogger) checkWriterIsFile() *os.File { + fi, ok := l.writer.w.(*os.File) + if !ok { + panic("Cannot enable coloring of non-file Writers") + } + return fi +} + +// Accept implements the SinkAdapter interface +func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) { + i.log(name, level, msg, args...) +} + +// ImpliedArgs returns the loggers implied args +func (i *intLogger) ImpliedArgs() []interface{} { + return i.implied +} + +// Name returns the loggers name +func (i *intLogger) Name() string { + return i.name +} + +// copy returns a shallow copy of the intLogger, replacing the level pointer +// when necessary +func (l *intLogger) copy() *intLogger { + sl := *l + + if l.independentLevels { + sl.level = new(int32) + *sl.level = *l.level + } + + return &sl +} diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go new file mode 100644 index 00000000000..50dee82203f --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -0,0 +1,373 @@ +package hclog + +import ( + "io" + "log" + "os" + "strings" + "time" +) + +var ( + // DefaultOutput is used as the default log output. + DefaultOutput io.Writer = os.Stderr + + // DefaultLevel is used as the default log level. + DefaultLevel = Info +) + +// Level represents a log level. +type Level int32 + +const ( + // NoLevel is a special level used to indicate that no level has been + // set and allow for a default to be used. + NoLevel Level = 0 + + // Trace is the most verbose level. Intended to be used for the tracing + // of actions in code, such as function enters/exits, etc. + Trace Level = 1 + + // Debug information for programmer low-level analysis. + Debug Level = 2 + + // Info information about steady state operations. + Info Level = 3 + + // Warn information about rare but handled events. + Warn Level = 4 + + // Error information about unrecoverable events. + Error Level = 5 + + // Off disables all logging output. + Off Level = 6 +) + +// Format is a simple convenience type for when formatting is required. When +// processing a value of this type, the logger automatically treats the first +// argument as a Printf formatting string and passes the rest as the values +// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}). +type Format []interface{} + +// Fmt returns a Format type. This is a convenience function for creating a Format +// type. +func Fmt(str string, args ...interface{}) Format { + return append(Format{str}, args...) +} + +// A simple shortcut to format numbers in hex when displayed with the normal +// text output. For example: L.Info("header value", Hex(17)) +type Hex int + +// A simple shortcut to format numbers in octal when displayed with the normal +// text output. For example: L.Info("perms", Octal(17)) +type Octal int + +// A simple shortcut to format numbers in binary when displayed with the normal +// text output. For example: L.Info("bits", Binary(17)) +type Binary int + +// A simple shortcut to format strings with Go quoting. Control and +// non-printable characters will be escaped with their backslash equivalents in +// output. Intended for untrusted or multiline strings which should be logged +// as concisely as possible. +type Quote string + +// ColorOption expresses how the output should be colored, if at all. +type ColorOption uint8 + +const ( + // ColorOff is the default coloration, and does not + // inject color codes into the io.Writer. + ColorOff ColorOption = iota + // AutoColor checks if the io.Writer is a tty, + // and if so enables coloring. + AutoColor + // ForceColor will enable coloring, regardless of whether + // the io.Writer is a tty or not. + ForceColor +) + +// LevelFromString returns a Level type for the named log level, or "NoLevel" if +// the level string is invalid. This facilitates setting the log level via +// config or environment variable by name in a predictable way. +func LevelFromString(levelStr string) Level { + // We don't care about case. Accept both "INFO" and "info". + levelStr = strings.ToLower(strings.TrimSpace(levelStr)) + switch levelStr { + case "trace": + return Trace + case "debug": + return Debug + case "info": + return Info + case "warn": + return Warn + case "error": + return Error + case "off": + return Off + default: + return NoLevel + } +} + +func (l Level) String() string { + switch l { + case Trace: + return "trace" + case Debug: + return "debug" + case Info: + return "info" + case Warn: + return "warn" + case Error: + return "error" + case NoLevel: + return "none" + case Off: + return "off" + default: + return "unknown" + } +} + +// Logger describes the interface that must be implemented by all loggers. +type Logger interface { + // Args are alternating key, val pairs + // keys must be strings + // vals can be any type, but display is implementation specific + // Emit a message and key/value pairs at a provided log level + Log(level Level, msg string, args ...interface{}) + + // Emit a message and key/value pairs at the TRACE level + Trace(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the DEBUG level + Debug(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the INFO level + Info(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the WARN level + Warn(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the ERROR level + Error(msg string, args ...interface{}) + + // Indicate if TRACE logs would be emitted. This and the other Is* guards + // are used to elide expensive logging code based on the current level. + IsTrace() bool + + // Indicate if DEBUG logs would be emitted. This and the other Is* guards + IsDebug() bool + + // Indicate if INFO logs would be emitted. This and the other Is* guards + IsInfo() bool + + // Indicate if WARN logs would be emitted. This and the other Is* guards + IsWarn() bool + + // Indicate if ERROR logs would be emitted. This and the other Is* guards + IsError() bool + + // ImpliedArgs returns With key/value pairs + ImpliedArgs() []interface{} + + // Creates a sublogger that will always have the given key/value pairs + With(args ...interface{}) Logger + + // Returns the Name of the logger + Name() string + + // Create a logger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + Named(name string) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamed(name string) Logger + + // Updates the level. This should affect all related loggers as well, + // unless they were created with IndependentLevels. If an + // implementation cannot update the level on the fly, it should no-op. + SetLevel(level Level) + + // Return a value that conforms to the stdlib log.Logger interface + StandardLogger(opts *StandardLoggerOptions) *log.Logger + + // Return a value that conforms to io.Writer, which can be passed into log.SetOutput() + StandardWriter(opts *StandardLoggerOptions) io.Writer +} + +// StandardLoggerOptions can be used to configure a new standard logger. +type StandardLoggerOptions struct { + // Indicate that some minimal parsing should be done on strings to try + // and detect their level and re-emit them. + // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], + // [DEBUG] and strip it off before reapplying it. + InferLevels bool + + // Indicate that some minimal parsing should be done on strings to try + // and detect their level and re-emit them while ignoring possible + // timestamp values in the beginning of the string. + // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], + // [DEBUG] and strip it off before reapplying it. + // The timestamp detection may result in false positives and incomplete + // string outputs. + InferLevelsWithTimestamp bool + + // ForceLevel is used to force all output from the standard logger to be at + // the specified level. Similar to InferLevels, this will strip any level + // prefix contained in the logged string before applying the forced level. + // If set, this override InferLevels. + ForceLevel Level +} + +type TimeFunction = func() time.Time + +// LoggerOptions can be used to configure a new logger. +type LoggerOptions struct { + // Name of the subsystem to prefix logs with + Name string + + // The threshold for the logger. Anything less severe is suppressed + Level Level + + // Where to write the logs to. Defaults to os.Stderr if nil + Output io.Writer + + // An optional Locker in case Output is shared. This can be a sync.Mutex or + // a NoopLocker if the caller wants control over output, e.g. for batching + // log lines. + Mutex Locker + + // Control if the output should be in JSON. + JSONFormat bool + + // Include file and line information in each log line + IncludeLocation bool + + // AdditionalLocationOffset is the number of additional stack levels to skip + // when finding the file and line information for the log line + AdditionalLocationOffset int + + // The time format to use instead of the default + TimeFormat string + + // A function which is called to get the time object that is formatted using `TimeFormat` + TimeFn TimeFunction + + // Control whether or not to display the time at all. This is required + // because setting TimeFormat to empty assumes the default format. + DisableTime bool + + // Color the output. On Windows, colored logs are only available for io.Writers that + // are concretely instances of *os.File. + Color ColorOption + + // Only color the header, not the body. This can help with readability of long messages. + ColorHeaderOnly bool + + // Color the header and message body fields. This can help with readability + // of long messages with multiple fields. + ColorHeaderAndFields bool + + // A function which is called with the log information and if it returns true the value + // should not be logged. + // This is useful when interacting with a system that you wish to suppress the log + // message for (because it's too noisy, etc) + Exclude func(level Level, msg string, args ...interface{}) bool + + // IndependentLevels causes subloggers to be created with an independent + // copy of this logger's level. This means that using SetLevel on this + // logger will not affect any subloggers, and SetLevel on any subloggers + // will not affect the parent or sibling loggers. + IndependentLevels bool +} + +// InterceptLogger describes the interface for using a logger +// that can register different output sinks. +// This is useful for sending lower level log messages +// to a different output while keeping the root logger +// at a higher one. +type InterceptLogger interface { + // Logger is the root logger for an InterceptLogger + Logger + + // RegisterSink adds a SinkAdapter to the InterceptLogger + RegisterSink(sink SinkAdapter) + + // DeregisterSink removes a SinkAdapter from the InterceptLogger + DeregisterSink(sink SinkAdapter) + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + NamedIntercept(name string) InterceptLogger + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamedIntercept(name string) InterceptLogger + + // Deprecated: use StandardLogger + StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger + + // Deprecated: use StandardWriter + StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer +} + +// SinkAdapter describes the interface that must be implemented +// in order to Register a new sink to an InterceptLogger +type SinkAdapter interface { + Accept(name string, level Level, msg string, args ...interface{}) +} + +// Flushable represents a method for flushing an output buffer. It can be used +// if Resetting the log to use a new output, in order to flush the writes to +// the existing output beforehand. +type Flushable interface { + Flush() error +} + +// OutputResettable provides ways to swap the output in use at runtime +type OutputResettable interface { + // ResetOutput swaps the current output writer with the one given in the + // opts. Color options given in opts will be used for the new output. + ResetOutput(opts *LoggerOptions) error + + // ResetOutputWithFlush swaps the current output writer with the one given + // in the opts, first calling Flush on the given Flushable. Color options + // given in opts will be used for the new output. + ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error +} + +// Locker is used for locking output. If not set when creating a logger, a +// sync.Mutex will be used internally. +type Locker interface { + // Lock is called when the output is going to be changed or written to + Lock() + + // Unlock is called when the operation that called Lock() completes + Unlock() +} + +// NoopLocker implements locker but does nothing. This is useful if the client +// wants tight control over locking, in order to provide grouping of log +// entries or other functionality. +type NoopLocker struct{} + +// Lock does nothing +func (n NoopLocker) Lock() {} + +// Unlock does nothing +func (n NoopLocker) Unlock() {} + +var _ Locker = (*NoopLocker)(nil) diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go new file mode 100644 index 00000000000..bc14f770807 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -0,0 +1,58 @@ +package hclog + +import ( + "io" + "io/ioutil" + "log" +) + +// NewNullLogger instantiates a Logger for which all calls +// will succeed without doing anything. +// Useful for testing purposes. +func NewNullLogger() Logger { + return &nullLogger{} +} + +type nullLogger struct{} + +func (l *nullLogger) Log(level Level, msg string, args ...interface{}) {} + +func (l *nullLogger) Trace(msg string, args ...interface{}) {} + +func (l *nullLogger) Debug(msg string, args ...interface{}) {} + +func (l *nullLogger) Info(msg string, args ...interface{}) {} + +func (l *nullLogger) Warn(msg string, args ...interface{}) {} + +func (l *nullLogger) Error(msg string, args ...interface{}) {} + +func (l *nullLogger) IsTrace() bool { return false } + +func (l *nullLogger) IsDebug() bool { return false } + +func (l *nullLogger) IsInfo() bool { return false } + +func (l *nullLogger) IsWarn() bool { return false } + +func (l *nullLogger) IsError() bool { return false } + +func (l *nullLogger) ImpliedArgs() []interface{} { return []interface{}{} } + +func (l *nullLogger) With(args ...interface{}) Logger { return l } + +func (l *nullLogger) Name() string { return "" } + +func (l *nullLogger) Named(name string) Logger { return l } + +func (l *nullLogger) ResetNamed(name string) Logger { return l } + +func (l *nullLogger) SetLevel(level Level) {} + +func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + return log.New(l.StandardWriter(opts), "", log.LstdFlags) +} + +func (l *nullLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return ioutil.Discard +} diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go new file mode 100644 index 00000000000..9b27bd3d3d9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stacktrace.go @@ -0,0 +1,109 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package hclog + +import ( + "bytes" + "runtime" + "strconv" + "strings" + "sync" +) + +var ( + _stacktraceIgnorePrefixes = []string{ + "runtime.goexit", + "runtime.main", + } + _stacktracePool = sync.Pool{ + New: func() interface{} { + return newProgramCounters(64) + }, + } +) + +// CapturedStacktrace represents a stacktrace captured by a previous call +// to log.Stacktrace. If passed to a logging function, the stacktrace +// will be appended. +type CapturedStacktrace string + +// Stacktrace captures a stacktrace of the current goroutine and returns +// it to be passed to a logging function. +func Stacktrace() CapturedStacktrace { + return CapturedStacktrace(takeStacktrace()) +} + +func takeStacktrace() string { + programCounters := _stacktracePool.Get().(*programCounters) + defer _stacktracePool.Put(programCounters) + + var buffer bytes.Buffer + + for { + // Skip the call to runtime.Counters and takeStacktrace so that the + // program counters start at the caller of takeStacktrace. + n := runtime.Callers(2, programCounters.pcs) + if n < cap(programCounters.pcs) { + programCounters.pcs = programCounters.pcs[:n] + break + } + // Don't put the too-short counter slice back into the pool; this lets + // the pool adjust if we consistently take deep stacktraces. + programCounters = newProgramCounters(len(programCounters.pcs) * 2) + } + + i := 0 + frames := runtime.CallersFrames(programCounters.pcs) + for frame, more := frames.Next(); more; frame, more = frames.Next() { + if shouldIgnoreStacktraceFunction(frame.Function) { + continue + } + if i != 0 { + buffer.WriteByte('\n') + } + i++ + buffer.WriteString(frame.Function) + buffer.WriteByte('\n') + buffer.WriteByte('\t') + buffer.WriteString(frame.File) + buffer.WriteByte(':') + buffer.WriteString(strconv.Itoa(int(frame.Line))) + } + + return buffer.String() +} + +func shouldIgnoreStacktraceFunction(function string) bool { + for _, prefix := range _stacktraceIgnorePrefixes { + if strings.HasPrefix(function, prefix) { + return true + } + } + return false +} + +type programCounters struct { + pcs []uintptr +} + +func newProgramCounters(size int) *programCounters { + return &programCounters{make([]uintptr, size)} +} diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go new file mode 100644 index 00000000000..641f20ccbcc --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -0,0 +1,110 @@ +package hclog + +import ( + "bytes" + "log" + "regexp" + "strings" +) + +// Regex to ignore characters commonly found in timestamp formats from the +// beginning of inputs. +var logTimestampRegexp = regexp.MustCompile(`^[\d\s\:\/\.\+-TZ]*`) + +// Provides a io.Writer to shim the data out of *log.Logger +// and back into our Logger. This is basically the only way to +// build upon *log.Logger. +type stdlogAdapter struct { + log Logger + inferLevels bool + inferLevelsWithTimestamp bool + forceLevel Level +} + +// Take the data, infer the levels if configured, and send it through +// a regular Logger. +func (s *stdlogAdapter) Write(data []byte) (int, error) { + str := string(bytes.TrimRight(data, " \t\n")) + + if s.forceLevel != NoLevel { + // Use pickLevel to strip log levels included in the line since we are + // forcing the level + _, str := s.pickLevel(str) + + // Log at the forced level + s.dispatch(str, s.forceLevel) + } else if s.inferLevels { + if s.inferLevelsWithTimestamp { + str = s.trimTimestamp(str) + } + + level, str := s.pickLevel(str) + s.dispatch(str, level) + } else { + s.log.Info(str) + } + + return len(data), nil +} + +func (s *stdlogAdapter) dispatch(str string, level Level) { + switch level { + case Trace: + s.log.Trace(str) + case Debug: + s.log.Debug(str) + case Info: + s.log.Info(str) + case Warn: + s.log.Warn(str) + case Error: + s.log.Error(str) + default: + s.log.Info(str) + } +} + +// Detect, based on conventions, what log level this is. +func (s *stdlogAdapter) pickLevel(str string) (Level, string) { + switch { + case strings.HasPrefix(str, "[DEBUG]"): + return Debug, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[TRACE]"): + return Trace, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[INFO]"): + return Info, strings.TrimSpace(str[6:]) + case strings.HasPrefix(str, "[WARN]"): + return Warn, strings.TrimSpace(str[6:]) + case strings.HasPrefix(str, "[ERROR]"): + return Error, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERR]"): + return Error, strings.TrimSpace(str[5:]) + default: + return Info, str + } +} + +func (s *stdlogAdapter) trimTimestamp(str string) string { + idx := logTimestampRegexp.FindStringIndex(str) + return str[idx[1]:] +} + +type logWriter struct { + l *log.Logger +} + +func (l *logWriter) Write(b []byte) (int, error) { + l.l.Println(string(bytes.TrimRight(b, " \n\t"))) + return len(b), nil +} + +// Takes a standard library logger and returns a Logger that will write to it +func FromStandardLogger(l *log.Logger, opts *LoggerOptions) Logger { + var dl LoggerOptions = *opts + + // Use the time format that log.Logger uses + dl.DisableTime = true + dl.Output = &logWriter{l} + + return New(&dl) +} diff --git a/vendor/github.com/hashicorp/go-hclog/writer.go b/vendor/github.com/hashicorp/go-hclog/writer.go new file mode 100644 index 00000000000..421a1f06c0b --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/writer.go @@ -0,0 +1,82 @@ +package hclog + +import ( + "bytes" + "io" +) + +type writer struct { + b bytes.Buffer + w io.Writer + color ColorOption +} + +func newWriter(w io.Writer, color ColorOption) *writer { + return &writer{w: w, color: color} +} + +func (w *writer) Flush(level Level) (err error) { + var unwritten = w.b.Bytes() + + if w.color != ColorOff { + color := _levelToColor[level] + unwritten = []byte(color.Sprintf("%s", unwritten)) + } + + if lw, ok := w.w.(LevelWriter); ok { + _, err = lw.LevelWrite(level, unwritten) + } else { + _, err = w.w.Write(unwritten) + } + w.b.Reset() + return err +} + +func (w *writer) Write(p []byte) (int, error) { + return w.b.Write(p) +} + +func (w *writer) WriteByte(c byte) error { + return w.b.WriteByte(c) +} + +func (w *writer) WriteString(s string) (int, error) { + return w.b.WriteString(s) +} + +// LevelWriter is the interface that wraps the LevelWrite method. +type LevelWriter interface { + LevelWrite(level Level, p []byte) (n int, err error) +} + +// LeveledWriter writes all log messages to the standard writer, +// except for log levels that are defined in the overrides map. +type LeveledWriter struct { + standard io.Writer + overrides map[Level]io.Writer +} + +// NewLeveledWriter returns an initialized LeveledWriter. +// +// standard will be used as the default writer for all log levels, +// except for log levels that are defined in the overrides map. +func NewLeveledWriter(standard io.Writer, overrides map[Level]io.Writer) *LeveledWriter { + return &LeveledWriter{ + standard: standard, + overrides: overrides, + } +} + +// Write implements io.Writer. +func (lw *LeveledWriter) Write(p []byte) (int, error) { + return lw.standard.Write(p) +} + +// LevelWrite implements LevelWriter. +func (lw *LeveledWriter) LevelWrite(level Level, p []byte) (int, error) { + w, ok := lw.overrides[level] + if !ok { + w = lw.standard + } + return w.Write(p) +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.gitignore b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md new file mode 100644 index 00000000000..86c6d03fbaa --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md @@ -0,0 +1,23 @@ +# UNRELEASED + +# 1.3.0 (September 17th, 2020) + +FEATURES + +* Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)] + +# 1.2.0 (March 18th, 2020) + +FEATURES + +* Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)] + +# 1.1.0 (May 22nd, 2019) + +FEATURES + +* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)] + +# 1.0.0 (August 30th, 2018) + +* go mod adopted diff --git a/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE new file mode 100644 index 00000000000..e87a115e462 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/hashicorp/go-immutable-radix/README.md new file mode 100644 index 00000000000..aca15a64212 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/README.md @@ -0,0 +1,66 @@ +go-immutable-radix [![CircleCI](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master) +========= + +Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +A tree supports using a transaction to batch multiple updates (insert, delete) +in a more efficient manner than performing each operation one at a time. + +For a mutable variant, see [go-radix](https://github.com/armon/go-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("foo"), 1) +r, _, _ = r.Insert([]byte("bar"), 2) +r, _, _ = r.Insert([]byte("foobar"), 2) + +// Find the longest prefix match +m, _, _ := r.Root().LongestPrefix([]byte("foozip")) +if string(m) != "foo" { + panic("should be foo") +} +``` + +Here is an example of performing a range scan of the keys. + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("001"), 1) +r, _, _ = r.Insert([]byte("002"), 2) +r, _, _ = r.Insert([]byte("005"), 5) +r, _, _ = r.Insert([]byte("010"), 10) +r, _, _ = r.Insert([]byte("100"), 10) + +// Range scan over the keys that sort lexicographically between [003, 050) +it := r.Root().Iterator() +it.SeekLowerBound([]byte("003")) +for key, _, ok := it.Next(); ok; key, _, ok = it.Next() { + if key >= "050" { + break + } + fmt.Println(key) +} +// Output: +// 005 +// 010 +``` + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/vendor/github.com/hashicorp/go-immutable-radix/edges.go new file mode 100644 index 00000000000..a63674775f2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/edges.go @@ -0,0 +1,21 @@ +package iradix + +import "sort" + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go new file mode 100644 index 00000000000..168bda76dfb --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go @@ -0,0 +1,676 @@ +package iradix + +import ( + "bytes" + "strings" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // defaultModifiedCache is the default size of the modified node + // cache used per transaction. This is used to cache the updates + // to the nodes near the root, while the leaves do not need to be + // cached. This is important for very large transactions to prevent + // the modified cache from growing to be enormous. This is also used + // to set the max size of the mutation notify maps since those should + // also be bounded in a similar way. + defaultModifiedCache = 8192 +) + +// Tree implements an immutable radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over a standard +// hash map is prefix-based lookups and ordered iteration. The immutability +// means that it is safe to concurrently read from a Tree without any +// coordination. +type Tree struct { + root *Node + size int +} + +// New returns an empty Tree +func New() *Tree { + t := &Tree{ + root: &Node{ + mutateCh: make(chan struct{}), + }, + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// Txn is a transaction on the tree. This transaction is applied +// atomically and returns a new tree when committed. A transaction +// is not thread safe, and should only be used by a single goroutine. +type Txn struct { + // root is the modified root for the transaction. + root *Node + + // snap is a snapshot of the root node for use if we have to run the + // slow notify algorithm. + snap *Node + + // size tracks the size of the tree as it is modified during the + // transaction. + size int + + // writable is a cache of writable nodes that have been created during + // the course of the transaction. This allows us to re-use the same + // nodes for further writes and avoid unnecessary copies of nodes that + // have never been exposed outside the transaction. This will only hold + // up to defaultModifiedCache number of entries. + writable *simplelru.LRU + + // trackChannels is used to hold channels that need to be notified to + // signal mutation of the tree. This will only hold up to + // defaultModifiedCache number of entries, after which we will set the + // trackOverflow flag, which will cause us to use a more expensive + // algorithm to perform the notifications. Mutation tracking is only + // performed if trackMutate is true. + trackChannels map[chan struct{}]struct{} + trackOverflow bool + trackMutate bool +} + +// Txn starts a new transaction that can be used to mutate the tree +func (t *Tree) Txn() *Txn { + txn := &Txn{ + root: t.root, + snap: t.root, + size: t.size, + } + return txn +} + +// Clone makes an independent copy of the transaction. The new transaction +// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread. +func (t *Txn) Clone() *Txn { + // reset the writable node cache to avoid leaking future writes into the clone + t.writable = nil + + txn := &Txn{ + root: t.root, + snap: t.snap, + size: t.size, + } + return txn +} + +// TrackMutate can be used to toggle if mutations are tracked. If this is enabled +// then notifications will be issued for affected internal nodes and leaves when +// the transaction is committed. +func (t *Txn) TrackMutate(track bool) { + t.trackMutate = track +} + +// trackChannel safely attempts to track the given mutation channel, setting the +// overflow flag if we can no longer track any more. This limits the amount of +// state that will accumulate during a transaction and we have a slower algorithm +// to switch to if we overflow. +func (t *Txn) trackChannel(ch chan struct{}) { + // In overflow, make sure we don't store any more objects. + if t.trackOverflow { + return + } + + // If this would overflow the state we reject it and set the flag (since + // we aren't tracking everything that's required any longer). + if len(t.trackChannels) >= defaultModifiedCache { + // Mark that we are in the overflow state + t.trackOverflow = true + + // Clear the map so that the channels can be garbage collected. It is + // safe to do this since we have already overflowed and will be using + // the slow notify algorithm. + t.trackChannels = nil + return + } + + // Create the map on the fly when we need it. + if t.trackChannels == nil { + t.trackChannels = make(map[chan struct{}]struct{}) + } + + // Otherwise we are good to track it. + t.trackChannels[ch] = struct{}{} +} + +// writeNode returns a node to be modified, if the current node has already been +// modified during the course of the transaction, it is used in-place. Set +// forLeafUpdate to true if you are getting a write node to update the leaf, +// which will set leaf mutation tracking appropriately as well. +func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node { + // Ensure the writable set exists. + if t.writable == nil { + lru, err := simplelru.NewLRU(defaultModifiedCache, nil) + if err != nil { + panic(err) + } + t.writable = lru + } + + // If this node has already been modified, we can continue to use it + // during this transaction. We know that we don't need to track it for + // a node update since the node is writable, but if this is for a leaf + // update we track it, in case the initial write to this node didn't + // update the leaf. + if _, ok := t.writable.Get(n); ok { + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + return n + } + + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Copy the existing node. If you have set forLeafUpdate it will be + // safe to replace this leaf with another after you get your node for + // writing. You MUST replace it, because the channel associated with + // this leaf will be closed when this transaction is committed. + nc := &Node{ + mutateCh: make(chan struct{}), + leaf: n.leaf, + } + if n.prefix != nil { + nc.prefix = make([]byte, len(n.prefix)) + copy(nc.prefix, n.prefix) + } + if len(n.edges) != 0 { + nc.edges = make([]edge, len(n.edges)) + copy(nc.edges, n.edges) + } + + // Mark this node as writable. + t.writable.Add(nc, nil) + return nc +} + +// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction +// Returns the size of the subtree visited +func (t *Txn) trackChannelsAndCount(n *Node) int { + // Count only leaf nodes + leaves := 0 + if n.leaf != nil { + leaves = 1 + } + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Recurse on the children + for _, e := range n.edges { + leaves += t.trackChannelsAndCount(e.node) + } + return leaves +} + +// mergeChild is called to collapse the given node with its child. This is only +// called when the given node is not a leaf and has a single edge. +func (t *Txn) mergeChild(n *Node) { + // Mark the child node as being mutated since we are about to abandon + // it. We don't need to mark the leaf since we are retaining it if it + // is there. + e := n.edges[0] + child := e.node + if t.trackMutate { + t.trackChannel(child.mutateCh) + } + + // Merge the nodes. + n.prefix = concat(n.prefix, child.prefix) + n.leaf = child.leaf + if len(child.edges) != 0 { + n.edges = make([]edge, len(child.edges)) + copy(n.edges, child.edges) + } else { + n.edges = nil + } +} + +// insert does a recursive insertion +func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) { + // Handle key exhaustion + if len(search) == 0 { + var oldVal interface{} + didUpdate := false + if n.isLeaf() { + oldVal = n.leaf.val + didUpdate = true + } + + nc := t.writeNode(n, true) + nc.leaf = &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + return nc, oldVal, didUpdate + } + + // Look for the edge + idx, child := n.getEdge(search[0]) + + // No edge, create one + if child == nil { + e := edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + }, + prefix: search, + }, + } + nc := t.writeNode(n, false) + nc.addEdge(e) + return nc, nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, child.prefix) + if commonPrefix == len(child.prefix) { + search = search[commonPrefix:] + newChild, oldVal, didUpdate := t.insert(child, k, search, v) + if newChild != nil { + nc := t.writeNode(n, false) + nc.edges[idx].node = newChild + return nc, oldVal, didUpdate + } + return nil, oldVal, didUpdate + } + + // Split the node + nc := t.writeNode(n, false) + splitNode := &Node{ + mutateCh: make(chan struct{}), + prefix: search[:commonPrefix], + } + nc.replaceEdge(edge{ + label: search[0], + node: splitNode, + }) + + // Restore the existing child node + modChild := t.writeNode(child, false) + splitNode.addEdge(edge{ + label: modChild.prefix[commonPrefix], + node: modChild, + }) + modChild.prefix = modChild.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + splitNode.leaf = leaf + return nc, nil, false + } + + // Create a new edge for the node + splitNode.addEdge(edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: leaf, + prefix: search, + }, + }) + return nc, nil, false +} + +// delete does a recursive deletion +func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { + // Check for key exhaustion + if len(search) == 0 { + if !n.isLeaf() { + return nil, nil + } + // Copy the pointer in case we are in a transaction that already + // modified this node since the node will be reused. Any changes + // made to the node will not affect returning the original leaf + // value. + oldLeaf := n.leaf + + // Remove the leaf node + nc := t.writeNode(n, true) + nc.leaf = nil + + // Check if this node should be merged + if n != t.root && len(nc.edges) == 1 { + t.mergeChild(nc) + } + return nc, oldLeaf + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + if child == nil || !bytes.HasPrefix(search, child.prefix) { + return nil, nil + } + + // Consume the search prefix + search = search[len(child.prefix):] + newChild, leaf := t.delete(n, child, search) + if newChild == nil { + return nil, nil + } + + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, leaf +} + +// delete does a recursive deletion +func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) { + // Check for key exhaustion + if len(search) == 0 { + nc := t.writeNode(n, true) + if n.isLeaf() { + nc.leaf = nil + } + nc.edges = nil + return nc, t.trackChannelsAndCount(n) + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix + // Need to do both so that we can delete prefixes that don't correspond to any node in the tree + if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { + return nil, 0 + } + + // Consume the search prefix + if len(child.prefix) > len(search) { + search = []byte("") + } else { + search = search[len(child.prefix):] + } + newChild, numDeletions := t.deletePrefix(n, child, search) + if newChild == nil { + return nil, 0 + } + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, numDeletions +} + +// Insert is used to add or update a given key. The return provides +// the previous value and a bool indicating if any was set. +func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) { + newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v) + if newRoot != nil { + t.root = newRoot + } + if !didUpdate { + t.size++ + } + return oldVal, didUpdate +} + +// Delete is used to delete a given key. Returns the old value if any, +// and a bool indicating if the key was set. +func (t *Txn) Delete(k []byte) (interface{}, bool) { + newRoot, leaf := t.delete(nil, t.root, k) + if newRoot != nil { + t.root = newRoot + } + if leaf != nil { + t.size-- + return leaf.val, true + } + return nil, false +} + +// DeletePrefix is used to delete an entire subtree that matches the prefix +// This will delete all nodes under that prefix +func (t *Txn) DeletePrefix(prefix []byte) bool { + newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix) + if newRoot != nil { + t.root = newRoot + t.size = t.size - numDeletions + return true + } + return false + +} + +// Root returns the current root of the radix tree within this +// transaction. The root is not safe across insert and delete operations, +// but can be used to read the current state during a transaction. +func (t *Txn) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Txn) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// GetWatch is used to lookup a specific key, returning +// the watch channel, value and if it was found +func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + return t.root.GetWatch(k) +} + +// Commit is used to finalize the transaction and return a new tree. If mutation +// tracking is turned on then notifications will also be issued. +func (t *Txn) Commit() *Tree { + nt := t.CommitOnly() + if t.trackMutate { + t.Notify() + } + return nt +} + +// CommitOnly is used to finalize the transaction and return a new tree, but +// does not issue any notifications until Notify is called. +func (t *Txn) CommitOnly() *Tree { + nt := &Tree{t.root, t.size} + t.writable = nil + return nt +} + +// slowNotify does a complete comparison of the before and after trees in order +// to trigger notifications. This doesn't require any additional state but it +// is very expensive to compute. +func (t *Txn) slowNotify() { + snapIter := t.snap.rawIterator() + rootIter := t.root.rawIterator() + for snapIter.Front() != nil || rootIter.Front() != nil { + // If we've exhausted the nodes in the old snapshot, we know + // there's nothing remaining to notify. + if snapIter.Front() == nil { + return + } + snapElem := snapIter.Front() + + // If we've exhausted the nodes in the new root, we know we need + // to invalidate everything that remains in the old snapshot. We + // know from the loop condition there's something in the old + // snapshot. + if rootIter.Front() == nil { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // Do one string compare so we can check the various conditions + // below without repeating the compare. + cmp := strings.Compare(snapIter.Path(), rootIter.Path()) + + // If the snapshot is behind the root, then we must have deleted + // this node during the transaction. + if cmp < 0 { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // If the snapshot is ahead of the root, then we must have added + // this node during the transaction. + if cmp > 0 { + rootIter.Next() + continue + } + + // If we have the same path, then we need to see if we mutated a + // node and possibly the leaf. + rootElem := rootIter.Front() + if snapElem != rootElem { + close(snapElem.mutateCh) + if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { + close(snapElem.leaf.mutateCh) + } + } + snapIter.Next() + rootIter.Next() + } +} + +// Notify is used along with TrackMutate to trigger notifications. This must +// only be done once a transaction is committed via CommitOnly, and it is called +// automatically by Commit. +func (t *Txn) Notify() { + if !t.trackMutate { + return + } + + // If we've overflowed the tracking state we can't use it in any way and + // need to do a full tree compare. + if t.trackOverflow { + t.slowNotify() + } else { + for ch := range t.trackChannels { + close(ch) + } + } + + // Clean up the tracking state so that a re-notify is safe (will trigger + // the else clause above which will be a no-op). + t.trackChannels = nil + t.trackOverflow = false +} + +// Insert is used to add or update a given key. The return provides +// the new tree, previous value and a bool indicating if any was set. +func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Insert(k, v) + return txn.Commit(), old, ok +} + +// Delete is used to delete a given key. Returns the new tree, +// old value if any, and a bool indicating if the key was set. +func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Delete(k) + return txn.Commit(), old, ok +} + +// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, +// and a bool indicating if the prefix matched any nodes +func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) { + txn := t.Txn() + ok := txn.DeletePrefix(k) + return txn.Commit(), ok +} + +// Root returns the root node of the tree which can be used for richer +// query operations. +func (t *Tree) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 []byte) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// concat two byte slices, returning a third new copy +func concat(a, b []byte) []byte { + c := make([]byte, len(a)+len(b)) + copy(c, a) + copy(c[len(a):], b) + return c +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go new file mode 100644 index 00000000000..f17d0a644f4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -0,0 +1,205 @@ +package iradix + +import ( + "bytes" +) + +// Iterator is used to iterate over a set of nodes +// in pre-order +type Iterator struct { + node *Node + stack []edges +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + // Wipe the stack + i.stack = nil + n := i.node + watch = n.mutateCh + search := prefix + for { + // Check for key exhaustion + if len(search) == 0 { + i.node = n + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + i.node = nil + return + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + i.node = n + return + } else { + i.node = nil + return + } + } +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (i *Iterator) SeekPrefix(prefix []byte) { + i.SeekPrefixWatch(prefix) +} + +func (i *Iterator) recurseMin(n *Node) *Node { + // Traverse to the minimum child + if n.leaf != nil { + return n + } + nEdges := len(n.edges) + if nEdges > 1 { + // Add all the other edges to the stack (the min node will be added as + // we recurse) + i.stack = append(i.stack, n.edges[1:]) + } + if nEdges > 0 { + return i.recurseMin(n.edges[0].node) + } + // Shouldn't be possible + return nil +} + +// SeekLowerBound is used to seek the iterator to the smallest key that is +// greater or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (i *Iterator) SeekLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. Note that the iterator will still recurse into + // children that we don't traverse on the way to the reverse lower bound as it + // walks the stack. + i.stack = []edges{} + // i.node starts off in the common case as pointing to the root node of the + // tree. By the time we return we have either found a lower bound and setup + // the stack to traverse all larger keys, or we have not and the stack and + // node should both be nil to prevent the iterator from assuming it is just + // iterating the whole tree from the root node. Either way this needs to end + // up as nil so just set it here. + n := i.node + i.node = nil + search := key + + found := func(n *Node) { + i.stack = append(i.stack, edges{edge{node: n}}) + } + + findMin := func(n *Node) { + n = i.recurseMin(n) + if n != nil { + found(n) + return + } + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp > 0 { + // Prefix is larger, that means the lower bound is greater than the search + // and from now on we need to follow the minimum path to the smallest + // leaf under this subtree. + findMin(n) + return + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no lower + // bound + i.node = nil + return + } + + // Prefix is equal, we are still heading for an exact match. If this is a + // leaf and an exact match we're done. + if n.leaf != nil && bytes.Equal(n.leaf.key, key) { + found(n) + return + } + + // Consume the search prefix if the current node has one. Note that this is + // safe because if n.prefix is longer than the search slice prefixCmp would + // have been > 0 above and the method would have already returned. + search = search[len(n.prefix):] + + if len(search) == 0 { + // We've exhausted the search key, but the current node is not an exact + // match or not a leaf. That means that the leaf value if it exists, and + // all child nodes must be strictly greater, the smallest key in this + // subtree must be the lower bound. + findMin(n) + return + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + if lbNode == nil { + return + } + + // Create stack edges for the all strictly higher edges in this node. + if idx+1 < len(n.edges) { + i.stack = append(i.stack, n.edges[idx+1:]) + } + + // Recurse + n = lbNode + } +} + +// Next returns the next node in order +func (i *Iterator) Next() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if i.stack == nil && i.node != nil { + i.stack = []edges{ + { + edge{node: i.node}, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack + n := len(i.stack) + last := i.stack[n-1] + elem := last[0].node + + // Update the stack + if len(last) > 1 { + i.stack[n-1] = last[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier + if len(elem.edges) > 0 { + i.stack = append(i.stack, elem.edges) + } + + // Return the leaf values if any + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + } + return nil, nil, false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go new file mode 100644 index 00000000000..35985480872 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go @@ -0,0 +1,334 @@ +package iradix + +import ( + "bytes" + "sort" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(k []byte, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + mutateCh chan struct{} + key []byte + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *Node +} + +// Node is an immutable node in the radix tree +type Node struct { + // mutateCh is closed if this node is modified + mutateCh chan struct{} + + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix []byte + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *Node) isLeaf() bool { + return n.leaf != nil +} + +func (n *Node) addEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + n.edges = append(n.edges, e) + if idx != num { + copy(n.edges[idx+1:], n.edges[idx:num]) + n.edges[idx] = e + } +} + +func (n *Node) replaceEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *Node) getEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) getLowerBoundEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + // we want lower bound behavior so return even if it's not an exact match + if idx < num { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + search := k + watch := n.mutateCh + for { + // Check for key exhaustion + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.mutateCh, n.leaf.val, true + } + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return watch, nil, false +} + +func (n *Node) Get(k []byte) (interface{}, bool) { + _, val, ok := n.GetWatch(k) + return val, ok +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) { + var last *leafNode + search := k + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return nil, nil, false +} + +// Minimum is used to return the minimum value in the tree +func (n *Node) Minimum() ([]byte, interface{}, bool) { + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return nil, nil, false +} + +// Maximum is used to return the maximum value in the tree +func (n *Node) Maximum() ([]byte, interface{}, bool) { + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } else { + break + } + } + return nil, nil, false +} + +// Iterator is used to return an iterator at +// the given node to walk the tree +func (n *Node) Iterator() *Iterator { + return &Iterator{node: n} +} + +// ReverseIterator is used to return an iterator at +// the given node to walk the tree backwards +func (n *Node) ReverseIterator() *ReverseIterator { + return NewReverseIterator(n) +} + +// rawIterator is used to return a raw iterator at the given node to walk the +// tree. +func (n *Node) rawIterator() *rawIterator { + iter := &rawIterator{node: n} + iter.Next() + return iter +} + +// Walk is used to walk the tree +func (n *Node) Walk(fn WalkFn) { + recursiveWalk(n, fn) +} + +// WalkBackwards is used to walk the tree in reverse order +func (n *Node) WalkBackwards(fn WalkFn) { + reverseRecursiveWalk(n, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (n *Node) WalkPath(path []byte, fn WalkFn) { + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// reverseRecursiveWalk is used to do a reverse pre-order +// walk of a node recursively. Returns true if the walk +// should be aborted +func reverseRecursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children in reverse order + for i := len(n.edges) - 1; i >= 0; i-- { + e := n.edges[i] + if reverseRecursiveWalk(e.node, fn) { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go new file mode 100644 index 00000000000..3c6a22525c8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go @@ -0,0 +1,78 @@ +package iradix + +// rawIterator visits each of the nodes in the tree, even the ones that are not +// leaves. It keeps track of the effective path (what a leaf at a given node +// would be called), which is useful for comparing trees. +type rawIterator struct { + // node is the starting node in the tree for the iterator. + node *Node + + // stack keeps track of edges in the frontier. + stack []rawStackEntry + + // pos is the current position of the iterator. + pos *Node + + // path is the effective path of the current iterator position, + // regardless of whether the current node is a leaf. + path string +} + +// rawStackEntry is used to keep track of the cumulative common path as well as +// its associated edges in the frontier. +type rawStackEntry struct { + path string + edges edges +} + +// Front returns the current node that has been iterated to. +func (i *rawIterator) Front() *Node { + return i.pos +} + +// Path returns the effective path of the current node, even if it's not actually +// a leaf. +func (i *rawIterator) Path() string { + return i.path +} + +// Next advances the iterator to the next node. +func (i *rawIterator) Next() { + // Initialize our stack if needed. + if i.stack == nil && i.node != nil { + i.stack = []rawStackEntry{ + { + edges: edges{ + edge{node: i.node}, + }, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack. + n := len(i.stack) + last := i.stack[n-1] + elem := last.edges[0].node + + // Update the stack. + if len(last.edges) > 1 { + i.stack[n-1].edges = last.edges[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier. + if len(elem.edges) > 0 { + path := last.path + string(elem.prefix) + i.stack = append(i.stack, rawStackEntry{path, elem.edges}) + } + + i.pos = elem + i.path = last.path + string(elem.prefix) + return + } + + i.pos = nil + i.path = "" +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go new file mode 100644 index 00000000000..554fa7129c1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go @@ -0,0 +1,239 @@ +package iradix + +import ( + "bytes" +) + +// ReverseIterator is used to iterate over a set of nodes +// in reverse in-order +type ReverseIterator struct { + i *Iterator + + // expandedParents stores the set of parent nodes whose relevant children have + // already been pushed into the stack. This can happen during seek or during + // iteration. + // + // Unlike forward iteration we need to recurse into children before we can + // output the value stored in an internal leaf since all children are greater. + // We use this to track whether we have already ensured all the children are + // in the stack. + expandedParents map[*Node]struct{} +} + +// NewReverseIterator returns a new ReverseIterator at a node +func NewReverseIterator(n *Node) *ReverseIterator { + return &ReverseIterator{ + i: &Iterator{node: n}, + } +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (ri *ReverseIterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + return ri.i.SeekPrefixWatch(prefix) +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (ri *ReverseIterator) SeekPrefix(prefix []byte) { + ri.i.SeekPrefixWatch(prefix) +} + +// SeekReverseLowerBound is used to seek the iterator to the largest key that is +// lower or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. Note that the iterator will still recurse into + // children that we don't traverse on the way to the reverse lower bound as it + // walks the stack. + ri.i.stack = []edges{} + // ri.i.node starts off in the common case as pointing to the root node of the + // tree. By the time we return we have either found a lower bound and setup + // the stack to traverse all larger keys, or we have not and the stack and + // node should both be nil to prevent the iterator from assuming it is just + // iterating the whole tree from the root node. Either way this needs to end + // up as nil so just set it here. + n := ri.i.node + ri.i.node = nil + search := key + + if ri.expandedParents == nil { + ri.expandedParents = make(map[*Node]struct{}) + } + + found := func(n *Node) { + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + // We need to mark this node as expanded in advance too otherwise the + // iterator will attempt to walk all of its children even though they are + // greater than the lower bound we have found. We've expanded it in the + // sense that all of its children that we want to walk are already in the + // stack (i.e. none of them). + ri.expandedParents[n] = struct{}{} + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no exact + // match for the search key. But we are looking in reverse, so the reverse + // lower bound will be the largest leaf under this subtree, since it is + // the value that would come right before the current search key if it + // were in the tree. So we need to follow the maximum path in this subtree + // to find it. Note that this is exactly what the iterator will already do + // if it finds a node in the stack that has _not_ been marked as expanded + // so in this one case we don't call `found` and instead let the iterator + // do the expansion and recursion through all the children. + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + return + } + + if prefixCmp > 0 { + // Prefix is larger than search prefix, or there is no prefix but we've + // also exhausted the search key. Either way, that means there is no + // reverse lower bound since nothing comes before our current search + // prefix. + return + } + + // If this is a leaf, something needs to happen! Note that if it's a leaf + // and prefixCmp was zero (which it must be to get here) then the leaf value + // is either an exact match for the search, or it's lower. It can't be + // greater. + if n.isLeaf() { + + // Firstly, if it's an exact match, we're done! + if bytes.Equal(n.leaf.key, key) { + found(n) + return + } + + // It's not so this node's leaf value must be lower and could still be a + // valid contender for reverse lower bound. + + // If it has no children then we are also done. + if len(n.edges) == 0 { + // This leaf is the lower bound. + found(n) + return + } + + // Finally, this leaf is internal (has children) so we'll keep searching, + // but we need to add it to the iterator's stack since it has a leaf value + // that needs to be iterated over. It needs to be added to the stack + // before its children below as it comes first. + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + // We also need to mark it as expanded since we'll be adding any of its + // relevant children below and so don't want the iterator to re-add them + // on its way back up the stack. + ri.expandedParents[n] = struct{}{} + } + + // Consume the search prefix. Note that this is safe because if n.prefix is + // longer than the search slice prefixCmp would have been > 0 above and the + // method would have already returned. + search = search[len(n.prefix):] + + if len(search) == 0 { + // We've exhausted the search key but we are not at a leaf. That means all + // children are greater than the search key so a reverse lower bound + // doesn't exist in this subtree. Note that there might still be one in + // the whole radix tree by following a different path somewhere further + // up. If that's the case then the iterator's stack will contain all the + // smaller nodes already and Previous will walk through them correctly. + return + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + + // From here, we need to update the stack with all values lower than + // the lower bound edge. Since getLowerBoundEdge() returns -1 when the + // search prefix is larger than all edges, we need to place idx at the + // last edge index so they can all be place in the stack, since they + // come before our search prefix. + if idx == -1 { + idx = len(n.edges) + } + + // Create stack edges for the all strictly lower edges in this node. + if len(n.edges[:idx]) > 0 { + ri.i.stack = append(ri.i.stack, n.edges[:idx]) + } + + // Exit if there's no lower bound edge. The stack will have the previous + // nodes already. + if lbNode == nil { + return + } + + // Recurse + n = lbNode + } +} + +// Previous returns the previous node in reverse order +func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if ri.i.stack == nil && ri.i.node != nil { + ri.i.stack = []edges{ + { + edge{node: ri.i.node}, + }, + } + } + + if ri.expandedParents == nil { + ri.expandedParents = make(map[*Node]struct{}) + } + + for len(ri.i.stack) > 0 { + // Inspect the last element of the stack + n := len(ri.i.stack) + last := ri.i.stack[n-1] + m := len(last) + elem := last[m-1].node + + _, alreadyExpanded := ri.expandedParents[elem] + + // If this is an internal node and we've not seen it already, we need to + // leave it in the stack so we can return its possible leaf value _after_ + // we've recursed through all its children. + if len(elem.edges) > 0 && !alreadyExpanded { + // record that we've seen this node! + ri.expandedParents[elem] = struct{}{} + // push child edges onto stack and skip the rest of the loop to recurse + // into the largest one. + ri.i.stack = append(ri.i.stack, elem.edges) + continue + } + + // Remove the node from the stack + if m > 1 { + ri.i.stack[n-1] = last[:m-1] + } else { + ri.i.stack = ri.i.stack[:n-1] + } + // We don't need this state any more as it's no longer in the stack so we + // won't visit it again + if alreadyExpanded { + delete(ri.expandedParents, elem) + } + + // If this is a leaf, return it + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + + // it's not a leaf so keep walking the stack to find the previous leaf + } + return nil, nil, false +} diff --git a/vendor/github.com/hashicorp/go-plugin/.gitignore b/vendor/github.com/hashicorp/go-plugin/.gitignore new file mode 100644 index 00000000000..4befed30a1c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/.gitignore @@ -0,0 +1,2 @@ +.DS_Store +.idea diff --git a/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md b/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md new file mode 100644 index 00000000000..8341962886b --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md @@ -0,0 +1,25 @@ +## v1.4.6 + +BUG FIXES: + +* server: Prevent gRPC broker goroutine leak when using `GRPCServer` type `GracefulStop()` or `Stop()` methods [[GH-220](https://github.com/hashicorp/go-plugin/pull/220)] + +## v1.4.5 + +ENHANCEMENTS: + +* client: log warning when SecureConfig is nil [[GH-207](https://github.com/hashicorp/go-plugin/pull/207)] + + +## v1.4.4 + +ENHANCEMENTS: + +* client: increase level of plugin exit logs [[GH-195](https://github.com/hashicorp/go-plugin/pull/195)] + +BUG FIXES: + +* Bidirectional communication: fix bidirectional communication when AutoMTLS is enabled [[GH-193](https://github.com/hashicorp/go-plugin/pull/193)] +* RPC: Trim a spurious log message for plugins using RPC [[GH-186](https://github.com/hashicorp/go-plugin/pull/186)] + + diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE new file mode 100644 index 00000000000..042324fb7e1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/LICENSE @@ -0,0 +1,355 @@ +Copyright (c) 2016 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md new file mode 100644 index 00000000000..39391f24fe4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -0,0 +1,164 @@ +# Go Plugin System over RPC + +`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system +that has been in use by HashiCorp tooling for over 4 years. While initially +created for [Packer](https://www.packer.io), it is additionally in use by +[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), +[Vault](https://www.vaultproject.io), and +[Boundary](https://www.boundaryproject.io). + +While the plugin system is over RPC, it is currently only designed to work +over a local [reliable] network. Plugins over a real network are not supported +and will lead to unexpected behavior. + +This plugin system has been used on millions of machines across many different +projects and has proven to be battle hardened and ready for production use. + +## Features + +The HashiCorp plugin system supports a number of features: + +**Plugins are Go interface implementations.** This makes writing and consuming +plugins feel very natural. To a plugin author: you just implement an +interface as if it were going to run in the same process. For a plugin user: +you just use and call functions on an interface as if it were in the same +process. This plugin system handles the communication in between. + +**Cross-language support.** Plugins can be written (and consumed) by +almost every major language. This library supports serving plugins via +[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written +in any language. + +**Complex arguments and return values are supported.** This library +provides APIs for handling complex arguments and return values such +as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library +(`MuxBroker`) for creating new connections between the client/server to +serve additional interfaces or transfer raw data. + +**Bidirectional communication.** Because the plugin system supports +complex arguments, the host process can send it interface implementations +and the plugin can call back into the host process. + +**Built-in Logging.** Any plugins that use the `log` standard library +will have log data automatically sent to the host process. The host +process will mirror this output prefixed with the path to the plugin +binary. This makes debugging with plugins simple. If the host system +uses [hclog](https://github.com/hashicorp/go-hclog) then the log data +will be structured. If the plugin also uses hclog, logs from the plugin +will be sent to the host hclog and be structured. + +**Protocol Versioning.** A very basic "protocol version" is supported that +can be incremented to invalidate any previous plugins. This is useful when +interface signatures are changing, protocol level changes are necessary, +etc. When a protocol version is incompatible, a human friendly error +message is shown to the end user. + +**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue +to use stdout/stderr as usual and the output will get mirrored back to +the host process. The host process can control what `io.Writer` these +streams go to to prevent this from happening. + +**TTY Preservation.** Plugin subprocesses are connected to the identical +stdin file descriptor as the host process, allowing software that requires +a TTY to work. For example, a plugin can execute `ssh` and even though there +are multiple subprocesses and RPC happening, it will look and act perfectly +to the end user. + +**Host upgrade while a plugin is running.** Plugins can be "reattached" +so that the host process can be upgraded while the plugin is still running. +This requires the host/plugin to know this is possible and daemonize +properly. `NewClient` takes a `ReattachConfig` to determine if and how to +reattach. + +**Cryptographically Secure Plugins.** Plugins can be verified with an expected +checksum and RPC communications can be configured to use TLS. The host process +must be properly secured to protect this configuration. + +## Architecture + +The HashiCorp plugin system works by launching subprocesses and communicating +over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io)). A single +connection is made between any plugin and the host process. For net/rpc-based +plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux) +library to multiplex any other connections on top. For gRPC-based plugins, +the HTTP2 protocol handles multiplexing. + +This architecture has a number of benefits: + + * Plugins can't crash your host process: A panic in a plugin doesn't + panic the plugin user. + + * Plugins are very easy to write: just write a Go application and `go build`. + Or use any other language to write a gRPC server with a tiny amount of + boilerplate to support go-plugin. + + * Plugins are very easy to install: just put the binary in a location where + the host will find it (depends on the host but this library also provides + helpers), and the plugin host handles the rest. + + * Plugins can be relatively secure: The plugin only has access to the + interfaces and args given to it, not to the entire memory space of the + process. Additionally, go-plugin can communicate with the plugin over + TLS. + +## Usage + +To use the plugin system, you must take the following steps. These are +high-level steps that must be done. Examples are available in the +`examples/` directory. + + 1. Choose the interface(s) you want to expose for plugins. + + 2. For each interface, implement an implementation of that interface + that communicates over a `net/rpc` connection or over a + [gRPC](http://www.grpc.io) connection or both. You'll have to implement + both a client and server implementation. + + 3. Create a `Plugin` implementation that knows how to create the RPC + client/server for a given plugin type. + + 4. Plugin authors call `plugin.Serve` to serve a plugin from the + `main` function. + + 5. Plugin users use `plugin.Client` to launch a subprocess and request + an interface implementation over RPC. + +That's it! In practice, step 2 is the most tedious and time consuming step. +Even so, it isn't very difficult and you can see examples in the `examples/` +directory as well as throughout our various open source projects. + +For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin). + +## Roadmap + +Our plugin system is constantly evolving. As we use the plugin system for +new projects or for new features in existing projects, we constantly find +improvements we can make. + +At this point in time, the roadmap for the plugin system is: + +**Semantic Versioning.** Plugins will be able to implement a semantic version. +This plugin system will give host processes a system for constraining +versions. This is in addition to the protocol versioning already present +which is more for larger underlying changes. + +## What About Shared Libraries? + +When we started using plugins (late 2012, early 2013), plugins over RPC +were the only option since Go didn't support dynamic library loading. Today, +Go supports the [plugin](https://golang.org/pkg/plugin/) standard library with +a number of limitations. Since 2012, our plugin system has stabilized +from tens of millions of users using it, and has many benefits we've come to +value greatly. + +For example, we use this plugin system in +[Vault](https://www.vaultproject.io) where dynamic library loading is +not acceptable for security reasons. That is an extreme +example, but we believe our library system has more upsides than downsides +over dynamic library loading and since we've had it built and tested for years, +we'll continue to use it. + +Shared libraries have one major advantage over our system which is much +higher performance. In real world scenarios across our various tools, +we've never required any more performance out of our plugin system and it +has seen very high throughput, so this isn't a concern for us at the moment. diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go new file mode 100644 index 00000000000..2e86f6213e2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -0,0 +1,1055 @@ +package plugin + +import ( + "bufio" + "context" + "crypto/subtle" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-hclog" + "google.golang.org/grpc" +) + +// If this is 1, then we've called CleanupClients. This can be used +// by plugin RPC implementations to change error behavior since you +// can expected network connection errors at this point. This should be +// read by using sync/atomic. +var Killed uint32 = 0 + +// This is a slice of the "managed" clients which are cleaned up when +// calling Cleanup +var managedClients = make([]*Client, 0, 5) +var managedClientsLock sync.Mutex + +// Error types +var ( + // ErrProcessNotFound is returned when a client is instantiated to + // reattach to an existing process and it isn't found. + ErrProcessNotFound = errors.New("Reattachment process not found") + + // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match + // the one provided in the SecureConfig. + ErrChecksumsDoNotMatch = errors.New("checksums did not match") + + // ErrSecureNoChecksum is returned when an empty checksum is provided to the + // SecureConfig. + ErrSecureConfigNoChecksum = errors.New("no checksum provided") + + // ErrSecureNoHash is returned when a nil Hash object is provided to the + // SecureConfig. + ErrSecureConfigNoHash = errors.New("no hash implementation provided") + + // ErrSecureConfigAndReattach is returned when both Reattach and + // SecureConfig are set. + ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set") +) + +// Client handles the lifecycle of a plugin application. It launches +// plugins, connects to them, dispenses interface implementations, and handles +// killing the process. +// +// Plugin hosts should use one Client for each plugin executable. To +// dispense a plugin type, use the `Client.Client` function, and then +// cal `Dispense`. This awkward API is mostly historical but is used to split +// the client that deals with subprocess management and the client that +// does RPC management. +// +// See NewClient and ClientConfig for using a Client. +type Client struct { + config *ClientConfig + exited bool + l sync.Mutex + address net.Addr + process *os.Process + client ClientProtocol + protocol Protocol + logger hclog.Logger + doneCtx context.Context + ctxCancel context.CancelFunc + negotiatedVersion int + + // clientWaitGroup is used to manage the lifecycle of the plugin management + // goroutines. + clientWaitGroup sync.WaitGroup + + // stderrWaitGroup is used to prevent the command's Wait() function from + // being called before we've finished reading from the stderr pipe. + stderrWaitGroup sync.WaitGroup + + // processKilled is used for testing only, to flag when the process was + // forcefully killed. + processKilled bool +} + +// NegotiatedVersion returns the protocol version negotiated with the server. +// This is only valid after Start() is called. +func (c *Client) NegotiatedVersion() int { + return c.negotiatedVersion +} + +// ClientConfig is the configuration used to initialize a new +// plugin client. After being used to initialize a plugin client, +// that configuration must not be modified again. +type ClientConfig struct { + // HandshakeConfig is the configuration that must match servers. + HandshakeConfig + + // Plugins are the plugins that can be consumed. + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet + + // One of the following must be set, but not both. + // + // Cmd is the unstarted subprocess for starting the plugin. If this is + // set, then the Client starts the plugin process on its own and connects + // to it. + // + // Reattach is configuration for reattaching to an existing plugin process + // that is already running. This isn't common. + Cmd *exec.Cmd + Reattach *ReattachConfig + + // SecureConfig is configuration for verifying the integrity of the + // executable. It can not be used with Reattach. + SecureConfig *SecureConfig + + // TLSConfig is used to enable TLS on the RPC client. + TLSConfig *tls.Config + + // Managed represents if the client should be managed by the + // plugin package or not. If true, then by calling CleanupClients, + // it will automatically be cleaned up. Otherwise, the client + // user is fully responsible for making sure to Kill all plugin + // clients. By default the client is _not_ managed. + Managed bool + + // The minimum and maximum port to use for communicating with + // the subprocess. If not set, this defaults to 10,000 and 25,000 + // respectively. + MinPort, MaxPort uint + + // StartTimeout is the timeout to wait for the plugin to say it + // has started successfully. + StartTimeout time.Duration + + // If non-nil, then the stderr of the client will be written to here + // (as well as the log). This is the original os.Stderr of the subprocess. + // This isn't the output of synced stderr. + Stderr io.Writer + + // SyncStdout, SyncStderr can be set to override the + // respective os.Std* values in the plugin. Care should be taken to + // avoid races here. If these are nil, then this will be set to + // ioutil.Discard. + SyncStdout io.Writer + SyncStderr io.Writer + + // AllowedProtocols is a list of allowed protocols. If this isn't set, + // then only netrpc is allowed. This is so that older go-plugin systems + // can show friendly errors if they see a plugin with an unknown + // protocol. + // + // By setting this, you can cause an error immediately on plugin start + // if an unsupported protocol is used with a good error message. + // + // If this isn't set at all (nil value), then only net/rpc is accepted. + // This is done for legacy reasons. You must explicitly opt-in to + // new protocols. + AllowedProtocols []Protocol + + // Logger is the logger that the client will used. If none is provided, + // it will default to hclog's default logger. + Logger hclog.Logger + + // AutoMTLS has the client and server automatically negotiate mTLS for + // transport authentication. This ensures that only the original client will + // be allowed to connect to the server, and all other connections will be + // rejected. The client will also refuse to connect to any server that isn't + // the original instance started by the client. + // + // In this mode of operation, the client generates a one-time use tls + // certificate, sends the public x.509 certificate to the new server, and + // the server generates a one-time use tls certificate, and sends the public + // x.509 certificate back to the client. These are used to authenticate all + // rpc connections between the client and server. + // + // Setting AutoMTLS to true implies that the server must support the + // protocol, and correctly negotiate the tls certificates, or a connection + // failure will result. + // + // The client should not set TLSConfig, nor should the server set a + // TLSProvider, because AutoMTLS implies that a new certificate and tls + // configuration will be generated at startup. + // + // You cannot Reattach to a server with this option enabled. + AutoMTLS bool + + // GRPCDialOptions allows plugin users to pass custom grpc.DialOption + // to create gRPC connections. This only affects plugins using the gRPC + // protocol. + GRPCDialOptions []grpc.DialOption +} + +// ReattachConfig is used to configure a client to reattach to an +// already-running plugin process. You can retrieve this information by +// calling ReattachConfig on Client. +type ReattachConfig struct { + Protocol Protocol + ProtocolVersion int + Addr net.Addr + Pid int + + // Test is set to true if this is reattaching to to a plugin in "test mode" + // (see ServeConfig.Test). In this mode, client.Kill will NOT kill the + // process and instead will rely on the plugin to terminate itself. This + // should not be used in non-test environments. + Test bool +} + +// SecureConfig is used to configure a client to verify the integrity of an +// executable before running. It does this by verifying the checksum is +// expected. Hash is used to specify the hashing method to use when checksumming +// the file. The configuration is verified by the client by calling the +// SecureConfig.Check() function. +// +// The host process should ensure the checksum was provided by a trusted and +// authoritative source. The binary should be installed in such a way that it +// can not be modified by an unauthorized user between the time of this check +// and the time of execution. +type SecureConfig struct { + Checksum []byte + Hash hash.Hash +} + +// Check takes the filepath to an executable and returns true if the checksum of +// the file matches the checksum provided in the SecureConfig. +func (s *SecureConfig) Check(filePath string) (bool, error) { + if len(s.Checksum) == 0 { + return false, ErrSecureConfigNoChecksum + } + + if s.Hash == nil { + return false, ErrSecureConfigNoHash + } + + file, err := os.Open(filePath) + if err != nil { + return false, err + } + defer file.Close() + + _, err = io.Copy(s.Hash, file) + if err != nil { + return false, err + } + + sum := s.Hash.Sum(nil) + + return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil +} + +// This makes sure all the managed subprocesses are killed and properly +// logged. This should be called before the parent process running the +// plugins exits. +// +// This must only be called _once_. +func CleanupClients() { + // Set the killed to true so that we don't get unexpected panics + atomic.StoreUint32(&Killed, 1) + + // Kill all the managed clients in parallel and use a WaitGroup + // to wait for them all to finish up. + var wg sync.WaitGroup + managedClientsLock.Lock() + for _, client := range managedClients { + wg.Add(1) + + go func(client *Client) { + client.Kill() + wg.Done() + }(client) + } + managedClientsLock.Unlock() + + wg.Wait() +} + +// Creates a new plugin client which manages the lifecycle of an external +// plugin and gets the address for the RPC connection. +// +// The client must be cleaned up at some point by calling Kill(). If +// the client is a managed client (created with NewManagedClient) you +// can just call CleanupClients at the end of your program and they will +// be properly cleaned. +func NewClient(config *ClientConfig) (c *Client) { + if config.MinPort == 0 && config.MaxPort == 0 { + config.MinPort = 10000 + config.MaxPort = 25000 + } + + if config.StartTimeout == 0 { + config.StartTimeout = 1 * time.Minute + } + + if config.Stderr == nil { + config.Stderr = ioutil.Discard + } + + if config.SyncStdout == nil { + config.SyncStdout = ioutil.Discard + } + if config.SyncStderr == nil { + config.SyncStderr = ioutil.Discard + } + + if config.AllowedProtocols == nil { + config.AllowedProtocols = []Protocol{ProtocolNetRPC} + } + + if config.Logger == nil { + config.Logger = hclog.New(&hclog.LoggerOptions{ + Output: hclog.DefaultOutput, + Level: hclog.Trace, + Name: "plugin", + }) + } + + c = &Client{ + config: config, + logger: config.Logger, + } + if config.Managed { + managedClientsLock.Lock() + managedClients = append(managedClients, c) + managedClientsLock.Unlock() + } + + return +} + +// Client returns the protocol client for this connection. +// +// Subsequent calls to this will return the same client. +func (c *Client) Client() (ClientProtocol, error) { + _, err := c.Start() + if err != nil { + return nil, err + } + + c.l.Lock() + defer c.l.Unlock() + + if c.client != nil { + return c.client, nil + } + + switch c.protocol { + case ProtocolNetRPC: + c.client, err = newRPCClient(c) + + case ProtocolGRPC: + c.client, err = newGRPCClient(c.doneCtx, c) + + default: + return nil, fmt.Errorf("unknown server protocol: %s", c.protocol) + } + + if err != nil { + c.client = nil + return nil, err + } + + return c.client, nil +} + +// Tells whether or not the underlying process has exited. +func (c *Client) Exited() bool { + c.l.Lock() + defer c.l.Unlock() + return c.exited +} + +// killed is used in tests to check if a process failed to exit gracefully, and +// needed to be killed. +func (c *Client) killed() bool { + c.l.Lock() + defer c.l.Unlock() + return c.processKilled +} + +// End the executing subprocess (if it is running) and perform any cleanup +// tasks necessary such as capturing any remaining logs and so on. +// +// This method blocks until the process successfully exits. +// +// This method can safely be called multiple times. +func (c *Client) Kill() { + // Grab a lock to read some private fields. + c.l.Lock() + process := c.process + addr := c.address + c.l.Unlock() + + // If there is no process, there is nothing to kill. + if process == nil { + return + } + + defer func() { + // Wait for the all client goroutines to finish. + c.clientWaitGroup.Wait() + + // Make sure there is no reference to the old process after it has been + // killed. + c.l.Lock() + c.process = nil + c.l.Unlock() + }() + + // We need to check for address here. It is possible that the plugin + // started (process != nil) but has no address (addr == nil) if the + // plugin failed at startup. If we do have an address, we need to close + // the plugin net connections. + graceful := false + if addr != nil { + // Close the client to cleanly exit the process. + client, err := c.Client() + if err == nil { + err = client.Close() + + // If there is no error, then we attempt to wait for a graceful + // exit. If there was an error, we assume that graceful cleanup + // won't happen and just force kill. + graceful = err == nil + if err != nil { + // If there was an error just log it. We're going to force + // kill in a moment anyways. + c.logger.Warn("error closing client during Kill", "err", err) + } + } else { + c.logger.Error("client", "error", err) + } + } + + // If we're attempting a graceful exit, then we wait for a short period + // of time to allow that to happen. To wait for this we just wait on the + // doneCh which would be closed if the process exits. + if graceful { + select { + case <-c.doneCtx.Done(): + c.logger.Debug("plugin exited") + return + case <-time.After(2 * time.Second): + } + } + + // If graceful exiting failed, just kill it + c.logger.Warn("plugin failed to exit gracefully") + process.Kill() + + c.l.Lock() + c.processKilled = true + c.l.Unlock() +} + +// Starts the underlying subprocess, communicating with it to negotiate +// a port for RPC connections, and returning the address to connect via RPC. +// +// This method is safe to call multiple times. Subsequent calls have no effect. +// Once a client has been started once, it cannot be started again, even if +// it was killed. +func (c *Client) Start() (addr net.Addr, err error) { + c.l.Lock() + defer c.l.Unlock() + + if c.address != nil { + return c.address, nil + } + + // If one of cmd or reattach isn't set, then it is an error. We wrap + // this in a {} for scoping reasons, and hopeful that the escape + // analysis will pop the stack here. + { + cmdSet := c.config.Cmd != nil + attachSet := c.config.Reattach != nil + secureSet := c.config.SecureConfig != nil + if cmdSet == attachSet { + return nil, fmt.Errorf("Only one of Cmd or Reattach must be set") + } + + if secureSet && attachSet { + return nil, ErrSecureConfigAndReattach + } + } + + if c.config.Reattach != nil { + return c.reattach() + } + + if c.config.VersionedPlugins == nil { + c.config.VersionedPlugins = make(map[int]PluginSet) + } + + // handle all plugins as versioned, using the handshake config as the default. + version := int(c.config.ProtocolVersion) + + // Make sure we're not overwriting a real version 0. If ProtocolVersion was + // non-zero, then we have to just assume the user made sure that + // VersionedPlugins doesn't conflict. + if _, ok := c.config.VersionedPlugins[version]; !ok && c.config.Plugins != nil { + c.config.VersionedPlugins[version] = c.config.Plugins + } + + var versionStrings []string + for v := range c.config.VersionedPlugins { + versionStrings = append(versionStrings, strconv.Itoa(v)) + } + + env := []string{ + fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue), + fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort), + fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), + fmt.Sprintf("PLUGIN_PROTOCOL_VERSIONS=%s", strings.Join(versionStrings, ",")), + } + + cmd := c.config.Cmd + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + cmd.Stdin = os.Stdin + + cmdStdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + cmdStderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + + if c.config.SecureConfig == nil { + c.logger.Warn("plugin configured with a nil SecureConfig") + } else { + if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { + return nil, fmt.Errorf("error verifying checksum: %s", err) + } else if !ok { + return nil, ErrChecksumsDoNotMatch + } + } + + // Setup a temporary certificate for client/server mtls, and send the public + // certificate to the plugin. + if c.config.AutoMTLS { + c.logger.Info("configuring client automatic mTLS") + certPEM, keyPEM, err := generateCert() + if err != nil { + c.logger.Error("failed to generate client certificate", "error", err) + return nil, err + } + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + c.logger.Error("failed to parse client certificate", "error", err) + return nil, err + } + + cmd.Env = append(cmd.Env, fmt.Sprintf("PLUGIN_CLIENT_CERT=%s", certPEM)) + + c.config.TLSConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + MinVersion: tls.VersionTLS12, + ServerName: "localhost", + } + } + + c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args) + err = cmd.Start() + if err != nil { + return + } + + // Set the process + c.process = cmd.Process + c.logger.Debug("plugin started", "path", cmd.Path, "pid", c.process.Pid) + + // Make sure the command is properly cleaned up if there is an error + defer func() { + r := recover() + + if err != nil || r != nil { + cmd.Process.Kill() + } + + if r != nil { + panic(r) + } + }() + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + // Start goroutine that logs the stderr + c.clientWaitGroup.Add(1) + c.stderrWaitGroup.Add(1) + // logStderr calls Done() + go c.logStderr(cmdStderr) + + c.clientWaitGroup.Add(1) + go func() { + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + defer c.clientWaitGroup.Done() + + // get the cmd info early, since the process information will be removed + // in Kill. + pid := c.process.Pid + path := cmd.Path + + // wait to finish reading from stderr since the stderr pipe reader + // will be closed by the subsequent call to cmd.Wait(). + c.stderrWaitGroup.Wait() + + // Wait for the command to end. + err := cmd.Wait() + + msgArgs := []interface{}{ + "path", path, + "pid", pid, + } + if err != nil { + msgArgs = append(msgArgs, + []interface{}{"error", err.Error()}...) + c.logger.Error("plugin process exited", msgArgs...) + } else { + // Log and make sure to flush the logs right away + c.logger.Info("plugin process exited", msgArgs...) + } + + os.Stderr.Sync() + + // Set that we exited, which takes a lock + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }() + + // Start a goroutine that is going to be reading the lines + // out of stdout + linesCh := make(chan string) + c.clientWaitGroup.Add(1) + go func() { + defer c.clientWaitGroup.Done() + defer close(linesCh) + + scanner := bufio.NewScanner(cmdStdout) + for scanner.Scan() { + linesCh <- scanner.Text() + } + }() + + // Make sure after we exit we read the lines from stdout forever + // so they don't block since it is a pipe. + // The scanner goroutine above will close this, but track it with a wait + // group for completeness. + c.clientWaitGroup.Add(1) + defer func() { + go func() { + defer c.clientWaitGroup.Done() + for range linesCh { + } + }() + }() + + // Some channels for the next step + timeout := time.After(c.config.StartTimeout) + + // Start looking for the address + c.logger.Debug("waiting for RPC address", "path", cmd.Path) + select { + case <-timeout: + err = errors.New("timeout while waiting for plugin to start") + case <-c.doneCtx.Done(): + err = errors.New("plugin exited before we could connect") + case line := <-linesCh: + // Trim the line and split by "|" in order to get the parts of + // the output. + line = strings.TrimSpace(line) + parts := strings.SplitN(line, "|", 6) + if len(parts) < 4 { + err = fmt.Errorf( + "Unrecognized remote plugin message: %s\n\n"+ + "This usually means that the plugin is either invalid or simply\n"+ + "needs to be recompiled to support the latest protocol.", line) + return + } + + // Check the core protocol. Wrapped in a {} for scoping. + { + var coreProtocol int + coreProtocol, err = strconv.Atoi(parts[0]) + if err != nil { + err = fmt.Errorf("Error parsing core protocol version: %s", err) + return + } + + if coreProtocol != CoreProtocolVersion { + err = fmt.Errorf("Incompatible core API version with plugin. "+ + "Plugin version: %s, Core version: %d\n\n"+ + "To fix this, the plugin usually only needs to be recompiled.\n"+ + "Please report this to the plugin author.", parts[0], CoreProtocolVersion) + return + } + } + + // Test the API version + version, pluginSet, err := c.checkProtoVersion(parts[1]) + if err != nil { + return addr, err + } + + // set the Plugins value to the compatible set, so the version + // doesn't need to be passed through to the ClientProtocol + // implementation. + c.config.Plugins = pluginSet + c.negotiatedVersion = version + c.logger.Debug("using plugin", "version", version) + + switch parts[2] { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", parts[3]) + case "unix": + addr, err = net.ResolveUnixAddr("unix", parts[3]) + default: + err = fmt.Errorf("Unknown address type: %s", parts[3]) + } + + // If we have a server type, then record that. We default to net/rpc + // for backwards compatibility. + c.protocol = ProtocolNetRPC + if len(parts) >= 5 { + c.protocol = Protocol(parts[4]) + } + + found := false + for _, p := range c.config.AllowedProtocols { + if p == c.protocol { + found = true + break + } + } + if !found { + err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v", + c.protocol, c.config.AllowedProtocols) + return addr, err + } + + // See if we have a TLS certificate from the server. + // Checking if the length is > 50 rules out catching the unused "extra" + // data returned from some older implementations. + if len(parts) >= 6 && len(parts[5]) > 50 { + err := c.loadServerCert(parts[5]) + if err != nil { + return nil, fmt.Errorf("error parsing server cert: %s", err) + } + } + } + + c.address = addr + return +} + +// loadServerCert is used by AutoMTLS to read an x.509 cert returned by the +// server, and load it as the RootCA and ClientCA for the client TLSConfig. +func (c *Client) loadServerCert(cert string) error { + certPool := x509.NewCertPool() + + asn1, err := base64.RawStdEncoding.DecodeString(cert) + if err != nil { + return err + } + + x509Cert, err := x509.ParseCertificate([]byte(asn1)) + if err != nil { + return err + } + + certPool.AddCert(x509Cert) + + c.config.TLSConfig.RootCAs = certPool + c.config.TLSConfig.ClientCAs = certPool + return nil +} + +func (c *Client) reattach() (net.Addr, error) { + // Verify the process still exists. If not, then it is an error + p, err := os.FindProcess(c.config.Reattach.Pid) + if err != nil { + // On Unix systems, FindProcess never returns an error. + // On Windows, for non-existent pids it returns: + // os.SyscallError - 'OpenProcess: the paremter is incorrect' + return nil, ErrProcessNotFound + } + + // Attempt to connect to the addr since on Unix systems FindProcess + // doesn't actually return an error if it can't find the process. + conn, err := net.Dial( + c.config.Reattach.Addr.Network(), + c.config.Reattach.Addr.String()) + if err != nil { + p.Kill() + return nil, ErrProcessNotFound + } + conn.Close() + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + c.clientWaitGroup.Add(1) + // Goroutine to mark exit status + go func(pid int) { + defer c.clientWaitGroup.Done() + + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + // Wait for the process to die + pidWait(pid) + + // Log so we can see it + c.logger.Debug("reattached plugin process exited") + + // Mark it + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }(p.Pid) + + // Set the address and protocol + c.address = c.config.Reattach.Addr + c.protocol = c.config.Reattach.Protocol + if c.protocol == "" { + // Default the protocol to net/rpc for backwards compatibility + c.protocol = ProtocolNetRPC + } + + if c.config.Reattach.Test { + c.negotiatedVersion = c.config.Reattach.ProtocolVersion + } + + // If we're in test mode, we do NOT set the process. This avoids the + // process being killed (the only purpose we have for c.process), since + // in test mode the process is responsible for exiting on its own. + if !c.config.Reattach.Test { + c.process = p + } + + return c.address, nil +} + +// checkProtoVersion returns the negotiated version and PluginSet. +// This returns an error if the server returned an incompatible protocol +// version, or an invalid handshake response. +func (c *Client) checkProtoVersion(protoVersion string) (int, PluginSet, error) { + serverVersion, err := strconv.Atoi(protoVersion) + if err != nil { + return 0, nil, fmt.Errorf("Error parsing protocol version %q: %s", protoVersion, err) + } + + // record these for the error message + var clientVersions []int + + // all versions, including the legacy ProtocolVersion have been added to + // the versions set + for version, plugins := range c.config.VersionedPlugins { + clientVersions = append(clientVersions, version) + + if serverVersion != version { + continue + } + return version, plugins, nil + } + + return 0, nil, fmt.Errorf("Incompatible API version with plugin. "+ + "Plugin version: %d, Client versions: %d", serverVersion, clientVersions) +} + +// ReattachConfig returns the information that must be provided to NewClient +// to reattach to the plugin process that this client started. This is +// useful for plugins that detach from their parent process. +// +// If this returns nil then the process hasn't been started yet. Please +// call Start or Client before calling this. +func (c *Client) ReattachConfig() *ReattachConfig { + c.l.Lock() + defer c.l.Unlock() + + if c.address == nil { + return nil + } + + if c.config.Cmd != nil && c.config.Cmd.Process == nil { + return nil + } + + // If we connected via reattach, just return the information as-is + if c.config.Reattach != nil { + return c.config.Reattach + } + + return &ReattachConfig{ + Protocol: c.protocol, + Addr: c.address, + Pid: c.config.Cmd.Process.Pid, + } +} + +// Protocol returns the protocol of server on the remote end. This will +// start the plugin process if it isn't already started. Errors from +// starting the plugin are surpressed and ProtocolInvalid is returned. It +// is recommended you call Start explicitly before calling Protocol to ensure +// no errors occur. +func (c *Client) Protocol() Protocol { + _, err := c.Start() + if err != nil { + return ProtocolInvalid + } + + return c.protocol +} + +func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) { + return func(_ string, _ time.Duration) (net.Conn, error) { + // Connect to the client + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + return conn, nil + } +} + +// dialer is compatible with grpc.WithDialer and creates the connection +// to the plugin. +func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { + conn, err := netAddrDialer(c.address)("", timeout) + if err != nil { + return nil, err + } + + // If we have a TLS config we wrap our connection. We only do this + // for net/rpc since gRPC uses its own mechanism for TLS. + if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + return conn, nil +} + +var stdErrBufferSize = 64 * 1024 + +func (c *Client) logStderr(r io.Reader) { + defer c.clientWaitGroup.Done() + defer c.stderrWaitGroup.Done() + l := c.logger.Named(filepath.Base(c.config.Cmd.Path)) + + reader := bufio.NewReaderSize(r, stdErrBufferSize) + // continuation indicates the previous line was a prefix + continuation := false + + for { + line, isPrefix, err := reader.ReadLine() + switch { + case err == io.EOF: + return + case err != nil: + l.Error("reading plugin stderr", "error", err) + return + } + + c.config.Stderr.Write(line) + + // The line was longer than our max token size, so it's likely + // incomplete and won't unmarshal. + if isPrefix || continuation { + l.Debug(string(line)) + + // if we're finishing a continued line, add the newline back in + if !isPrefix { + c.config.Stderr.Write([]byte{'\n'}) + } + + continuation = isPrefix + continue + } + + c.config.Stderr.Write([]byte{'\n'}) + + entry, err := parseJSON(line) + // If output is not JSON format, print directly to Debug + if err != nil { + // Attempt to infer the desired log level from the commonly used + // string prefixes + switch line := string(line); { + case strings.HasPrefix(line, "[TRACE]"): + l.Trace(line) + case strings.HasPrefix(line, "[DEBUG]"): + l.Debug(line) + case strings.HasPrefix(line, "[INFO]"): + l.Info(line) + case strings.HasPrefix(line, "[WARN]"): + l.Warn(line) + case strings.HasPrefix(line, "[ERROR]"): + l.Error(line) + default: + l.Debug(line) + } + } else { + out := flattenKVPairs(entry.KVPairs) + + out = append(out, "timestamp", entry.Timestamp.Format(hclog.TimeFormat)) + switch hclog.LevelFromString(entry.Level) { + case hclog.Trace: + l.Trace(entry.Message, out...) + case hclog.Debug: + l.Debug(entry.Message, out...) + case hclog.Info: + l.Info(entry.Message, out...) + case hclog.Warn: + l.Warn(entry.Message, out...) + case hclog.Error: + l.Error(entry.Message, out...) + default: + // if there was no log level, it's likely this is unexpected + // json from something other than hclog, and we should output + // it verbatim. + l.Debug(string(line)) + } + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go new file mode 100644 index 00000000000..d22c566ed50 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/discover.go @@ -0,0 +1,28 @@ +package plugin + +import ( + "path/filepath" +) + +// Discover discovers plugins that are in a given directory. +// +// The directory doesn't need to be absolute. For example, "." will work fine. +// +// This currently assumes any file matching the glob is a plugin. +// In the future this may be smarter about checking that a file is +// executable and so on. +// +// TODO: test +func Discover(glob, dir string) ([]string, error) { + var err error + + // Make the directory absolute if it isn't already + if !filepath.IsAbs(dir) { + dir, err = filepath.Abs(dir) + if err != nil { + return nil, err + } + } + + return filepath.Glob(filepath.Join(dir, glob)) +} diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go new file mode 100644 index 00000000000..22a7baa6a0d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/error.go @@ -0,0 +1,24 @@ +package plugin + +// This is a type that wraps error types so that they can be messaged +// across RPC channels. Since "error" is an interface, we can't always +// gob-encode the underlying structure. This is a valid error interface +// implementer that we will push across. +type BasicError struct { + Message string +} + +// NewBasicError is used to create a BasicError. +// +// err is allowed to be nil. +func NewBasicError(err error) *BasicError { + if err == nil { + return nil + } + + return &BasicError{err.Error()} +} + +func (e *BasicError) Error() string { + return e.Message +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go new file mode 100644 index 00000000000..daf142d1709 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go @@ -0,0 +1,457 @@ +package plugin + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-plugin/internal/plugin" + + "github.com/oklog/run" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// streamer interface is used in the broker to send/receive connection +// information. +type streamer interface { + Send(*plugin.ConnInfo) error + Recv() (*plugin.ConnInfo, error) + Close() +} + +// sendErr is used to pass errors back during a send. +type sendErr struct { + i *plugin.ConnInfo + ch chan error +} + +// gRPCBrokerServer is used by the plugin to start a stream and to send +// connection information to/from the plugin. Implements GRPCBrokerServer and +// streamer interfaces. +type gRPCBrokerServer struct { + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerServer() *gRPCBrokerServer { + return &gRPCBrokerServer{ + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerServer interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the client. +func (s *gRPCBrokerServer) StartStream(stream plugin.GRPCBroker_StartStreamServer) error { + doneCh := stream.Context().Done() + defer s.Close() + + // Proccess send stream + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + // Process receive stream + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the client. +func (s *gRPCBrokerServer) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the client from the stream to the broker. +func (s *gRPCBrokerServer) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerServer) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// gRPCBrokerClientImpl is used by the client to start a stream and to send +// connection information to/from the client. Implements GRPCBrokerClient and +// streamer interfaces. +type gRPCBrokerClientImpl struct { + // client is the underlying GRPC client used to make calls to the server. + client plugin.GRPCBrokerClient + + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl { + return &gRPCBrokerClientImpl{ + client: plugin.NewGRPCBrokerClient(conn), + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerClient interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the plugin. +func (s *gRPCBrokerClientImpl) StartStream() error { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + defer s.Close() + + stream, err := s.client.StartStream(ctx) + if err != nil { + return err + } + doneCh := stream.Context().Done() + + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the plugin. +func (s *gRPCBrokerClientImpl) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the plugin to the broker. +func (s *gRPCBrokerClientImpl) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerClientImpl) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// GRPCBroker is responsible for brokering connections by unique ID. +// +// It is used by plugins to create multiple gRPC connections and data +// streams between the plugin process and the host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type GRPCBroker struct { + nextId uint32 + streamer streamer + streams map[uint32]*gRPCBrokerPending + tls *tls.Config + doneCh chan struct{} + o sync.Once + + sync.Mutex +} + +type gRPCBrokerPending struct { + ch chan *plugin.ConnInfo + doneCh chan struct{} +} + +func newGRPCBroker(s streamer, tls *tls.Config) *GRPCBroker { + return &GRPCBroker{ + streamer: s, + streams: make(map[uint32]*gRPCBrokerPending), + tls: tls, + doneCh: make(chan struct{}), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) { + listener, err := serverListener() + if err != nil { + return nil, err + } + + err = b.streamer.Send(&plugin.ConnInfo{ + ServiceId: id, + Network: listener.Addr().Network(), + Address: listener.Addr().String(), + }) + if err != nil { + return nil, err + } + + return listener, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve a gRPC server on that stream ID. This is used to easily serve +// complex arguments. Each AcceptAndServe call opens a new listener socket and +// sends the connection info down the stream to the dialer. Since a new +// connection is opened every call, these calls should be used sparingly. +// Multiple gRPC server implementations can be registered to a single +// AcceptAndServe call. +func (b *GRPCBroker) AcceptAndServe(id uint32, s func([]grpc.ServerOption) *grpc.Server) { + listener, err := b.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + defer listener.Close() + + var opts []grpc.ServerOption + if b.tls != nil { + opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))} + } + + server := s(opts) + + // Here we use a run group to close this goroutine if the server is shutdown + // or the broker is shutdown. + var g run.Group + { + // Serve on the listener, if shutting down call GracefulStop. + g.Add(func() error { + return server.Serve(listener) + }, func(err error) { + server.GracefulStop() + }) + } + { + // block on the closeCh or the doneCh. If we are shutting down close the + // closeCh. + closeCh := make(chan struct{}) + g.Add(func() error { + select { + case <-b.doneCh: + case <-closeCh: + } + return nil + }, func(err error) { + close(closeCh) + }) + } + + // Block until we are done + g.Run() +} + +// Close closes the stream and all servers. +func (b *GRPCBroker) Close() error { + b.streamer.Close() + b.o.Do(func() { + close(b.doneCh) + }) + return nil +} + +// Dial opens a connection by ID. +func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) { + var c *plugin.ConnInfo + + // Open the stream + p := b.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + return nil, fmt.Errorf("timeout waiting for connection info") + } + + var addr net.Addr + switch c.Network { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", c.Address) + case "unix": + addr, err = net.ResolveUnixAddr("unix", c.Address) + default: + err = fmt.Errorf("Unknown address type: %s", c.Address) + } + if err != nil { + return nil, err + } + + return dialGRPCConn(b.tls, netAddrDialer(addr)) +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of calls. In practice +// we've never seen it happen. +func (m *GRPCBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of GRPCBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *GRPCBroker) Run() { + for { + stream, err := m.streamer.Recv() + if err != nil { + // Once we receive an error, just exit + break + } + + // Initialize the waiter + p := m.getStream(stream.ServiceId) + select { + case p.ch <- stream: + default: + } + + go m.timeoutWait(stream.ServiceId, p) + } +} + +func (m *GRPCBroker) getStream(id uint32) *gRPCBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &gRPCBrokerPending{ + ch: make(chan *plugin.ConnInfo, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go new file mode 100644 index 00000000000..842903c922b --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -0,0 +1,126 @@ +package plugin + +import ( + "crypto/tls" + "fmt" + "math" + "net" + "time" + + "github.com/hashicorp/go-plugin/internal/plugin" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health/grpc_health_v1" +) + +func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error), dialOpts ...grpc.DialOption) (*grpc.ClientConn, error) { + // Build dialing options. + opts := make([]grpc.DialOption, 0) + + // We use a custom dialer so that we can connect over unix domain sockets. + opts = append(opts, grpc.WithDialer(dialer)) + + // Fail right away + opts = append(opts, grpc.FailOnNonTempDialError(true)) + + // If we have no TLS configuration set, we need to explicitly tell grpc + // that we're connecting with an insecure connection. + if tls == nil { + opts = append(opts, grpc.WithInsecure()) + } else { + opts = append(opts, grpc.WithTransportCredentials( + credentials.NewTLS(tls))) + } + + opts = append(opts, + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), + grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32))) + + // Add our custom options if we have any + opts = append(opts, dialOpts...) + + // Connect. Note the first parameter is unused because we use a custom + // dialer that has the state to see the address. + conn, err := grpc.Dial("unused", opts...) + if err != nil { + return nil, err + } + + return conn, nil +} + +// newGRPCClient creates a new GRPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { + conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer, c.config.GRPCDialOptions...) + if err != nil { + return nil, err + } + + // Start the broker. + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig) + go broker.Run() + go brokerGRPCClient.StartStream() + + // Start the stdio client + stdioClient, err := newGRPCStdioClient(doneCtx, c.logger.Named("stdio"), conn) + if err != nil { + return nil, err + } + go stdioClient.Run(c.config.SyncStdout, c.config.SyncStderr) + + cl := &GRPCClient{ + Conn: conn, + Plugins: c.config.Plugins, + doneCtx: doneCtx, + broker: broker, + controller: plugin.NewGRPCControllerClient(conn), + } + + return cl, nil +} + +// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types. +type GRPCClient struct { + Conn *grpc.ClientConn + Plugins map[string]Plugin + + doneCtx context.Context + broker *GRPCBroker + + controller plugin.GRPCControllerClient +} + +// ClientProtocol impl. +func (c *GRPCClient) Close() error { + c.broker.Close() + c.controller.Shutdown(c.doneCtx, &plugin.Empty{}) + return c.Conn.Close() +} + +// ClientProtocol impl. +func (c *GRPCClient) Dispense(name string) (interface{}, error) { + raw, ok := c.Plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + p, ok := raw.(GRPCPlugin) + if !ok { + return nil, fmt.Errorf("plugin %q doesn't support gRPC", name) + } + + return p.GRPCClient(c.doneCtx, c.broker, c.Conn) +} + +// ClientProtocol impl. +func (c *GRPCClient) Ping() error { + client := grpc_health_v1.NewHealthClient(c.Conn) + _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{ + Service: GRPCServiceName, + }) + + return err +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go new file mode 100644 index 00000000000..1a8a8e70ea4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go @@ -0,0 +1,23 @@ +package plugin + +import ( + "context" + + "github.com/hashicorp/go-plugin/internal/plugin" +) + +// GRPCControllerServer handles shutdown calls to terminate the server when the +// plugin client is closed. +type grpcControllerServer struct { + server *GRPCServer +} + +// Shutdown stops the grpc server. It first will attempt a graceful stop, then a +// full stop on the server. +func (s *grpcControllerServer) Shutdown(ctx context.Context, _ *plugin.Empty) (*plugin.Empty, error) { + resp := &plugin.Empty{} + + // TODO: figure out why GracefullStop doesn't work. + s.server.Stop() + return resp, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go new file mode 100644 index 00000000000..54b061cc365 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -0,0 +1,161 @@ +package plugin + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/reflection" +) + +// GRPCServiceName is the name of the service that the health check should +// return as passing. +const GRPCServiceName = "plugin" + +// DefaultGRPCServer can be used with the "GRPCServer" field for Server +// as a default factory method to create a gRPC server with no extra options. +func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server { + return grpc.NewServer(opts...) +} + +// GRPCServer is a ServerType implementation that serves plugins over +// gRPC. This allows plugins to easily be written for other languages. +// +// The GRPCServer outputs a custom configuration as a base64-encoded +// JSON structure represented by the GRPCServerConfig config structure. +type GRPCServer struct { + // Plugins are the list of plugins to serve. + Plugins map[string]Plugin + + // Server is the actual server that will accept connections. This + // will be used for plugin registration as well. + Server func([]grpc.ServerOption) *grpc.Server + + // TLS should be the TLS configuration if available. If this is nil, + // the connection will not have transport security. + TLS *tls.Config + + // DoneCh is the channel that is closed when this server has exited. + DoneCh chan struct{} + + // Stdout/StderrLis are the readers for stdout/stderr that will be copied + // to the stdout/stderr connection that is output. + Stdout io.Reader + Stderr io.Reader + + config GRPCServerConfig + server *grpc.Server + broker *GRPCBroker + stdioServer *grpcStdioServer + + logger hclog.Logger +} + +// ServerProtocol impl. +func (s *GRPCServer) Init() error { + // Create our server + var opts []grpc.ServerOption + if s.TLS != nil { + opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS))) + } + s.server = s.Server(opts) + + // Register the health service + healthCheck := health.NewServer() + healthCheck.SetServingStatus( + GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) + grpc_health_v1.RegisterHealthServer(s.server, healthCheck) + + // Register the reflection service + reflection.Register(s.server) + + // Register the broker service + brokerServer := newGRPCBrokerServer() + plugin.RegisterGRPCBrokerServer(s.server, brokerServer) + s.broker = newGRPCBroker(brokerServer, s.TLS) + go s.broker.Run() + + // Register the controller + controllerServer := &grpcControllerServer{server: s} + plugin.RegisterGRPCControllerServer(s.server, controllerServer) + + // Register the stdio service + s.stdioServer = newGRPCStdioServer(s.logger, s.Stdout, s.Stderr) + plugin.RegisterGRPCStdioServer(s.server, s.stdioServer) + + // Register all our plugins onto the gRPC server. + for k, raw := range s.Plugins { + p, ok := raw.(GRPCPlugin) + if !ok { + return fmt.Errorf("%q is not a GRPC-compatible plugin", k) + } + + if err := p.GRPCServer(s.broker, s.server); err != nil { + return fmt.Errorf("error registering %q: %s", k, err) + } + } + + return nil +} + +// Stop calls Stop on the underlying grpc.Server and Close on the underlying +// grpc.Broker if present. +func (s *GRPCServer) Stop() { + s.server.Stop() + + if s.broker != nil { + s.broker.Close() + s.broker = nil + } +} + +// GracefulStop calls GracefulStop on the underlying grpc.Server and Close on +// the underlying grpc.Broker if present. +func (s *GRPCServer) GracefulStop() { + s.server.GracefulStop() + + if s.broker != nil { + s.broker.Close() + s.broker = nil + } +} + +// Config is the GRPCServerConfig encoded as JSON then base64. +func (s *GRPCServer) Config() string { + // Create a buffer that will contain our final contents + var buf bytes.Buffer + + // Wrap the base64 encoding with JSON encoding. + if err := json.NewEncoder(&buf).Encode(s.config); err != nil { + // We panic since ths shouldn't happen under any scenario. We + // carefully control the structure being encoded here and it should + // always be successful. + panic(err) + } + + return buf.String() +} + +func (s *GRPCServer) Serve(lis net.Listener) { + defer close(s.DoneCh) + err := s.server.Serve(lis) + if err != nil { + s.logger.Error("grpc server", "error", err) + } +} + +// GRPCServerConfig is the extra configuration passed along for consumers +// to facilitate using GRPC plugins. +type GRPCServerConfig struct { + StdoutAddr string `json:"stdout_addr"` + StderrAddr string `json:"stderr_addr"` +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go new file mode 100644 index 00000000000..a582181505f --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go @@ -0,0 +1,207 @@ +package plugin + +import ( + "bufio" + "bytes" + "context" + "io" + + empty "github.com/golang/protobuf/ptypes/empty" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// grpcStdioBuffer is the buffer size we try to fill when sending a chunk of +// stdio data. This is currently 1 KB for no reason other than that seems like +// enough (stdio data isn't that common) and is fairly low. +const grpcStdioBuffer = 1 * 1024 + +// grpcStdioServer implements the Stdio service and streams stdiout/stderr. +type grpcStdioServer struct { + stdoutCh <-chan []byte + stderrCh <-chan []byte +} + +// newGRPCStdioServer creates a new grpcStdioServer and starts the stream +// copying for the given out and err readers. +// +// This must only be called ONCE per srcOut, srcErr. +func newGRPCStdioServer(log hclog.Logger, srcOut, srcErr io.Reader) *grpcStdioServer { + stdoutCh := make(chan []byte) + stderrCh := make(chan []byte) + + // Begin copying the streams + go copyChan(log, stdoutCh, srcOut) + go copyChan(log, stderrCh, srcErr) + + // Construct our server + return &grpcStdioServer{ + stdoutCh: stdoutCh, + stderrCh: stderrCh, + } +} + +// StreamStdio streams our stdout/err as the response. +func (s *grpcStdioServer) StreamStdio( + _ *empty.Empty, + srv plugin.GRPCStdio_StreamStdioServer, +) error { + // Share the same data value between runs. Sending this over the wire + // marshals it so we can reuse this. + var data plugin.StdioData + + for { + // Read our data + select { + case data.Data = <-s.stdoutCh: + data.Channel = plugin.StdioData_STDOUT + + case data.Data = <-s.stderrCh: + data.Channel = plugin.StdioData_STDERR + + case <-srv.Context().Done(): + return nil + } + + // Not sure if this is possible, but if we somehow got here and + // we didn't populate any data at all, then just continue. + if len(data.Data) == 0 { + continue + } + + // Send our data to the client. + if err := srv.Send(&data); err != nil { + return err + } + } +} + +// grpcStdioClient wraps the stdio service as a client to copy +// the stdio data to output writers. +type grpcStdioClient struct { + log hclog.Logger + stdioClient plugin.GRPCStdio_StreamStdioClient +} + +// newGRPCStdioClient creates a grpcStdioClient. This will perform the +// initial connection to the stdio service. If the stdio service is unavailable +// then this will be a no-op. This allows this to work without error for +// plugins that don't support this. +func newGRPCStdioClient( + ctx context.Context, + log hclog.Logger, + conn *grpc.ClientConn, +) (*grpcStdioClient, error) { + client := plugin.NewGRPCStdioClient(conn) + + // Connect immediately to the endpoint + stdioClient, err := client.StreamStdio(ctx, &empty.Empty{}) + + // If we get an Unavailable or Unimplemented error, this means that the plugin isn't + // updated and linking to the latest version of go-plugin that supports + // this. We fall back to the previous behavior of just not syncing anything. + if status.Code(err) == codes.Unavailable || status.Code(err) == codes.Unimplemented { + log.Warn("stdio service not available, stdout/stderr syncing unavailable") + stdioClient = nil + err = nil + } + if err != nil { + return nil, err + } + + return &grpcStdioClient{ + log: log, + stdioClient: stdioClient, + }, nil +} + +// Run starts the loop that receives stdio data and writes it to the given +// writers. This blocks and should be run in a goroutine. +func (c *grpcStdioClient) Run(stdout, stderr io.Writer) { + // This will be nil if stdio is not supported by the plugin + if c.stdioClient == nil { + c.log.Warn("stdio service unavailable, run will do nothing") + return + } + + for { + c.log.Trace("waiting for stdio data") + data, err := c.stdioClient.Recv() + if err != nil { + if err == io.EOF || + status.Code(err) == codes.Unavailable || + status.Code(err) == codes.Canceled || + status.Code(err) == codes.Unimplemented || + err == context.Canceled { + c.log.Debug("received EOF, stopping recv loop", "err", err) + return + } + + c.log.Error("error receiving data", "err", err) + return + } + + // Determine our output writer based on channel + var w io.Writer + switch data.Channel { + case plugin.StdioData_STDOUT: + w = stdout + + case plugin.StdioData_STDERR: + w = stderr + + default: + c.log.Warn("unknown channel, dropping", "channel", data.Channel) + continue + } + + // Write! In the event of an error we just continue. + if c.log.IsTrace() { + c.log.Trace("received data", "channel", data.Channel.String(), "len", len(data.Data)) + } + if _, err := io.Copy(w, bytes.NewReader(data.Data)); err != nil { + c.log.Error("failed to copy all bytes", "err", err) + } + } +} + +// copyChan copies an io.Reader into a channel. +func copyChan(log hclog.Logger, dst chan<- []byte, src io.Reader) { + bufsrc := bufio.NewReader(src) + + for { + // Make our data buffer. We allocate a new one per loop iteration + // so that we can send it over the channel. + var data [1024]byte + + // Read the data, this will block until data is available + n, err := bufsrc.Read(data[:]) + + // We have to check if we have data BEFORE err != nil. The bufio + // docs guarantee n == 0 on EOF but its better to be safe here. + if n > 0 { + // We have data! Send it on the channel. This will block if there + // is no reader on the other side. We expect that go-plugin will + // connect immediately to the stdio server to drain this so we want + // this block to happen for backpressure. + dst <- data[:n] + } + + // If we hit EOF we're done copying + if err == io.EOF { + log.Debug("stdio EOF, exiting copy loop") + return + } + + // Any other error we just exit the loop. We don't expect there to + // be errors since our use case for this is reading/writing from + // a in-process pipe (os.Pipe). + if err != nil { + log.Warn("error copying stdio data, stopping copy", "err", err) + return + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go new file mode 100644 index 00000000000..fb9d415254f --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto ./grpc_stdio.proto --go_out=plugins=grpc:. + +package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go new file mode 100644 index 00000000000..6bf103859f8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go @@ -0,0 +1,203 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_broker.proto + +package plugin + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ConnInfo struct { + ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnInfo) Reset() { *m = ConnInfo{} } +func (m *ConnInfo) String() string { return proto.CompactTextString(m) } +func (*ConnInfo) ProtoMessage() {} +func (*ConnInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_grpc_broker_3322b07398605250, []int{0} +} +func (m *ConnInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnInfo.Unmarshal(m, b) +} +func (m *ConnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnInfo.Marshal(b, m, deterministic) +} +func (dst *ConnInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnInfo.Merge(dst, src) +} +func (m *ConnInfo) XXX_Size() int { + return xxx_messageInfo_ConnInfo.Size(m) +} +func (m *ConnInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ConnInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnInfo proto.InternalMessageInfo + +func (m *ConnInfo) GetServiceId() uint32 { + if m != nil { + return m.ServiceId + } + return 0 +} + +func (m *ConnInfo) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *ConnInfo) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func init() { + proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCBrokerClient is the client API for GRPCBroker service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCBrokerClient interface { + StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) +} + +type gRPCBrokerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCBrokerClient(cc *grpc.ClientConn) GRPCBrokerClient { + return &gRPCBrokerClient{cc} +} + +func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_GRPCBroker_serviceDesc.Streams[0], "/plugin.GRPCBroker/StartStream", opts...) + if err != nil { + return nil, err + } + x := &gRPCBrokerStartStreamClient{stream} + return x, nil +} + +type GRPCBroker_StartStreamClient interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ClientStream +} + +type gRPCBrokerStartStreamClient struct { + grpc.ClientStream +} + +func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error { + return x.ClientStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCBrokerServer is the server API for GRPCBroker service. +type GRPCBrokerServer interface { + StartStream(GRPCBroker_StartStreamServer) error +} + +func RegisterGRPCBrokerServer(s *grpc.Server, srv GRPCBrokerServer) { + s.RegisterService(&_GRPCBroker_serviceDesc, srv) +} + +func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream}) +} + +type GRPCBroker_StartStreamServer interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ServerStream +} + +type gRPCBrokerStartStreamServer struct { + grpc.ServerStream +} + +func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error { + return x.ServerStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _GRPCBroker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCBroker", + HandlerType: (*GRPCBrokerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StartStream", + Handler: _GRPCBroker_StartStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc_broker.proto", +} + +func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_grpc_broker_3322b07398605250) } + +var fileDescriptor_grpc_broker_3322b07398605250 = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, + 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b, + 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91, + 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7, + 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20, + 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc, + 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1, + 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b, + 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b, + 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto new file mode 100644 index 00000000000..aa3df4630a7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +message ConnInfo { + uint32 service_id = 1; + string network = 2; + string address = 3; +} + +service GRPCBroker { + rpc StartStream(stream ConnInfo) returns (stream ConnInfo); +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go new file mode 100644 index 00000000000..3e39da95a89 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go @@ -0,0 +1,145 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_controller.proto + +package plugin + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_grpc_controller_08f8296ef6d80436, []int{0} +} +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (dst *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(dst, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Empty)(nil), "plugin.Empty") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCControllerClient is the client API for GRPCController service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCControllerClient interface { + Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) +} + +type gRPCControllerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCControllerClient(cc *grpc.ClientConn) GRPCControllerClient { + return &gRPCControllerClient{cc} +} + +func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/plugin.GRPCController/Shutdown", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GRPCControllerServer is the server API for GRPCController service. +type GRPCControllerServer interface { + Shutdown(context.Context, *Empty) (*Empty, error) +} + +func RegisterGRPCControllerServer(s *grpc.Server, srv GRPCControllerServer) { + s.RegisterService(&_GRPCController_serviceDesc, srv) +} + +func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GRPCControllerServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/plugin.GRPCController/Shutdown", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _GRPCController_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCController", + HandlerType: (*GRPCControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Shutdown", + Handler: _GRPCController_Shutdown_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc_controller.proto", +} + +func init() { + proto.RegisterFile("grpc_controller.proto", fileDescriptor_grpc_controller_08f8296ef6d80436) +} + +var fileDescriptor_grpc_controller_08f8296ef6d80436 = []byte{ + // 108 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d, + 0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0, + 0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03, + 0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto new file mode 100644 index 00000000000..345d0a1c1f2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +message Empty { +} + +// The GRPCController is responsible for telling the plugin server to shutdown. +service GRPCController { + rpc Shutdown(Empty) returns (Empty); +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go new file mode 100644 index 00000000000..c8f94921b46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go @@ -0,0 +1,233 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_stdio.proto + +package plugin + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import empty "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type StdioData_Channel int32 + +const ( + StdioData_INVALID StdioData_Channel = 0 + StdioData_STDOUT StdioData_Channel = 1 + StdioData_STDERR StdioData_Channel = 2 +) + +var StdioData_Channel_name = map[int32]string{ + 0: "INVALID", + 1: "STDOUT", + 2: "STDERR", +} +var StdioData_Channel_value = map[string]int32{ + "INVALID": 0, + "STDOUT": 1, + "STDERR": 2, +} + +func (x StdioData_Channel) String() string { + return proto.EnumName(StdioData_Channel_name, int32(x)) +} +func (StdioData_Channel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0, 0} +} + +// StdioData is a single chunk of stdout or stderr data that is streamed +// from GRPCStdio. +type StdioData struct { + Channel StdioData_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=plugin.StdioData_Channel" json:"channel,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StdioData) Reset() { *m = StdioData{} } +func (m *StdioData) String() string { return proto.CompactTextString(m) } +func (*StdioData) ProtoMessage() {} +func (*StdioData) Descriptor() ([]byte, []int) { + return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0} +} +func (m *StdioData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StdioData.Unmarshal(m, b) +} +func (m *StdioData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StdioData.Marshal(b, m, deterministic) +} +func (dst *StdioData) XXX_Merge(src proto.Message) { + xxx_messageInfo_StdioData.Merge(dst, src) +} +func (m *StdioData) XXX_Size() int { + return xxx_messageInfo_StdioData.Size(m) +} +func (m *StdioData) XXX_DiscardUnknown() { + xxx_messageInfo_StdioData.DiscardUnknown(m) +} + +var xxx_messageInfo_StdioData proto.InternalMessageInfo + +func (m *StdioData) GetChannel() StdioData_Channel { + if m != nil { + return m.Channel + } + return StdioData_INVALID +} + +func (m *StdioData) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterType((*StdioData)(nil), "plugin.StdioData") + proto.RegisterEnum("plugin.StdioData_Channel", StdioData_Channel_name, StdioData_Channel_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCStdioClient is the client API for GRPCStdio service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCStdioClient interface { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) +} + +type gRPCStdioClient struct { + cc *grpc.ClientConn +} + +func NewGRPCStdioClient(cc *grpc.ClientConn) GRPCStdioClient { + return &gRPCStdioClient{cc} +} + +func (c *gRPCStdioClient) StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) { + stream, err := c.cc.NewStream(ctx, &_GRPCStdio_serviceDesc.Streams[0], "/plugin.GRPCStdio/StreamStdio", opts...) + if err != nil { + return nil, err + } + x := &gRPCStdioStreamStdioClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type GRPCStdio_StreamStdioClient interface { + Recv() (*StdioData, error) + grpc.ClientStream +} + +type gRPCStdioStreamStdioClient struct { + grpc.ClientStream +} + +func (x *gRPCStdioStreamStdioClient) Recv() (*StdioData, error) { + m := new(StdioData) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCStdioServer is the server API for GRPCStdio service. +type GRPCStdioServer interface { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + StreamStdio(*empty.Empty, GRPCStdio_StreamStdioServer) error +} + +func RegisterGRPCStdioServer(s *grpc.Server, srv GRPCStdioServer) { + s.RegisterService(&_GRPCStdio_serviceDesc, srv) +} + +func _GRPCStdio_StreamStdio_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(empty.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(GRPCStdioServer).StreamStdio(m, &gRPCStdioStreamStdioServer{stream}) +} + +type GRPCStdio_StreamStdioServer interface { + Send(*StdioData) error + grpc.ServerStream +} + +type gRPCStdioStreamStdioServer struct { + grpc.ServerStream +} + +func (x *gRPCStdioStreamStdioServer) Send(m *StdioData) error { + return x.ServerStream.SendMsg(m) +} + +var _GRPCStdio_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCStdio", + HandlerType: (*GRPCStdioServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamStdio", + Handler: _GRPCStdio_StreamStdio_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc_stdio.proto", +} + +func init() { proto.RegisterFile("grpc_stdio.proto", fileDescriptor_grpc_stdio_db2934322ca63bd5) } + +var fileDescriptor_grpc_stdio_db2934322ca63bd5 = []byte{ + // 221 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0x2f, 0x2a, 0x48, + 0x8e, 0x2f, 0x2e, 0x49, 0xc9, 0xcc, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, 0xc8, + 0x29, 0x4d, 0xcf, 0xcc, 0x93, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x07, 0x8b, 0x26, + 0x95, 0xa6, 0xe9, 0xa7, 0xe6, 0x16, 0x94, 0x54, 0x42, 0x14, 0x29, 0xb5, 0x30, 0x72, 0x71, 0x06, + 0x83, 0x34, 0xb9, 0x24, 0x96, 0x24, 0x0a, 0x19, 0x73, 0xb1, 0x27, 0x67, 0x24, 0xe6, 0xe5, 0xa5, + 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x19, 0x49, 0xea, 0x41, 0x0c, 0xd1, 0x83, 0xab, 0xd1, + 0x73, 0x86, 0x28, 0x08, 0x82, 0xa9, 0x14, 0x12, 0xe2, 0x62, 0x49, 0x49, 0x2c, 0x49, 0x94, 0x60, + 0x52, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0xb3, 0x95, 0xf4, 0xb8, 0xd8, 0xa1, 0xea, 0x84, 0xb8, 0xb9, + 0xd8, 0x3d, 0xfd, 0xc2, 0x1c, 0x7d, 0x3c, 0x5d, 0x04, 0x18, 0x84, 0xb8, 0xb8, 0xd8, 0x82, 0x43, + 0x5c, 0xfc, 0x43, 0x43, 0x04, 0x18, 0xa1, 0x6c, 0xd7, 0xa0, 0x20, 0x01, 0x26, 0x23, 0x77, 0x2e, + 0x4e, 0xf7, 0xa0, 0x00, 0x67, 0xb0, 0x2d, 0x42, 0x56, 0x5c, 0xdc, 0xc1, 0x25, 0x45, 0xa9, 0x89, + 0xb9, 0x10, 0xae, 0x98, 0x1e, 0xc4, 0x03, 0x7a, 0x30, 0x0f, 0xe8, 0xb9, 0x82, 0x3c, 0x20, 0x25, + 0x88, 0xe1, 0x36, 0x03, 0x46, 0x27, 0x8e, 0x28, 0xa8, 0xb7, 0x93, 0xd8, 0xc0, 0xca, 0x8d, 0x01, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xbb, 0xe0, 0x69, 0x19, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto new file mode 100644 index 00000000000..ce1a1223035 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +import "google/protobuf/empty.proto"; + +// GRPCStdio is a service that is automatically run by the plugin process +// to stream any stdout/err data so that it can be mirrored on the plugin +// host side. +service GRPCStdio { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + rpc StreamStdio(google.protobuf.Empty) returns (stream StdioData); +} + +// StdioData is a single chunk of stdout or stderr data that is streamed +// from GRPCStdio. +message StdioData { + enum Channel { + INVALID = 0; + STDOUT = 1; + STDERR = 2; + } + + Channel channel = 1; + bytes data = 2; +} diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go new file mode 100644 index 00000000000..fb2ef930caa --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/log_entry.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "encoding/json" + "time" +) + +// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host +type logEntry struct { + Message string `json:"@message"` + Level string `json:"@level"` + Timestamp time.Time `json:"timestamp"` + KVPairs []*logEntryKV `json:"kv_pairs"` +} + +// logEntryKV is a key value pair within the Output payload +type logEntryKV struct { + Key string `json:"key"` + Value interface{} `json:"value"` +} + +// flattenKVPairs is used to flatten KVPair slice into []interface{} +// for hclog consumption. +func flattenKVPairs(kvs []*logEntryKV) []interface{} { + var result []interface{} + for _, kv := range kvs { + result = append(result, kv.Key) + result = append(result, kv.Value) + } + + return result +} + +// parseJSON handles parsing JSON output +func parseJSON(input []byte) (*logEntry, error) { + var raw map[string]interface{} + entry := &logEntry{} + + err := json.Unmarshal(input, &raw) + if err != nil { + return nil, err + } + + // Parse hclog-specific objects + if v, ok := raw["@message"]; ok { + entry.Message = v.(string) + delete(raw, "@message") + } + + if v, ok := raw["@level"]; ok { + entry.Level = v.(string) + delete(raw, "@level") + } + + if v, ok := raw["@timestamp"]; ok { + t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string)) + if err != nil { + return nil, err + } + entry.Timestamp = t + delete(raw, "@timestamp") + } + + // Parse dynamic KV args from the hclog payload. + for k, v := range raw { + entry.KVPairs = append(entry.KVPairs, &logEntryKV{ + Key: k, + Value: v, + }) + } + + return entry, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/mtls.go b/vendor/github.com/hashicorp/go-plugin/mtls.go new file mode 100644 index 00000000000..88955245877 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mtls.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "time" +) + +// generateCert generates a temporary certificate for plugin authentication. The +// certificate and private key are returns in PEM format. +func generateCert() (cert []byte, privateKey []byte, err error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, nil, err + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + sn, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, nil, err + } + + host := "localhost" + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + Organization: []string{"HashiCorp"}, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + SerialNumber: sn, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + IsCA: true, + } + + der, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, nil, err + } + + var certOut bytes.Buffer + if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil { + return nil, nil, err + } + + keyBytes, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, nil, err + } + + var keyOut bytes.Buffer + if err := pem.Encode(&keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { + return nil, nil, err + } + + cert = certOut.Bytes() + privateKey = keyOut.Bytes() + + return cert, privateKey, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go new file mode 100644 index 00000000000..01c45ad7c68 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mux_broker.go @@ -0,0 +1,204 @@ +package plugin + +import ( + "encoding/binary" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/yamux" +) + +// MuxBroker is responsible for brokering multiplexed connections by unique ID. +// +// It is used by plugins to multiplex multiple RPC connections and data +// streams on top of a single connection between the plugin process and the +// host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new multiplexed streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type MuxBroker struct { + nextId uint32 + session *yamux.Session + streams map[uint32]*muxBrokerPending + + sync.Mutex +} + +type muxBrokerPending struct { + ch chan net.Conn + doneCh chan struct{} +} + +func newMuxBroker(s *yamux.Session) *MuxBroker { + return &MuxBroker{ + session: s, + streams: make(map[uint32]*muxBrokerPending), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (m *MuxBroker) Accept(id uint32) (net.Conn, error) { + var c net.Conn + p := m.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + m.Lock() + defer m.Unlock() + delete(m.streams, id) + + return nil, fmt.Errorf("timeout waiting for accept") + } + + // Ack our connection + if err := binary.Write(c, binary.LittleEndian, id); err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve an RPC server on that stream ID. This is used to easily serve +// complex arguments. +// +// The served interface is always registered to the "Plugin" name. +func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) { + conn, err := m.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + + serve(conn, "Plugin", v) +} + +// Close closes the connection and all sub-connections. +func (m *MuxBroker) Close() error { + return m.session.Close() +} + +// Dial opens a connection by ID. +func (m *MuxBroker) Dial(id uint32) (net.Conn, error) { + // Open the stream + stream, err := m.session.OpenStream() + if err != nil { + return nil, err + } + + // Write the stream ID onto the wire. + if err := binary.Write(stream, binary.LittleEndian, id); err != nil { + stream.Close() + return nil, err + } + + // Read the ack that we connected. Then we're off! + var ack uint32 + if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil { + stream.Close() + return nil, err + } + if ack != id { + stream.Close() + return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id) + } + + return stream, nil +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of RPC calls. In practice +// we've never seen it happen. +func (m *MuxBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of MuxBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *MuxBroker) Run() { + for { + stream, err := m.session.AcceptStream() + if err != nil { + // Once we receive an error, just exit + break + } + + // Read the stream ID from the stream + var id uint32 + if err := binary.Read(stream, binary.LittleEndian, &id); err != nil { + stream.Close() + continue + } + + // Initialize the waiter + p := m.getStream(id) + select { + case p.ch <- stream: + default: + } + + // Wait for a timeout + go m.timeoutWait(id, p) + } +} + +func (m *MuxBroker) getStream(id uint32) *muxBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &muxBrokerPending{ + ch: make(chan net.Conn, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + timeout := false + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + timeout = true + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) + + // If we timed out, then check if we have a channel in the buffer, + // and if so, close it. + if timeout { + select { + case s := <-p.ch: + s.Close() + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go new file mode 100644 index 00000000000..79d9674633a --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -0,0 +1,58 @@ +// The plugin package exposes functions and helpers for communicating to +// plugins which are implemented as standalone binary applications. +// +// plugin.Client fully manages the lifecycle of executing the application, +// connecting to it, and returning the RPC client for dispensing plugins. +// +// plugin.Serve fully manages listeners to expose an RPC server from a binary +// that plugin.Client can connect to. +package plugin + +import ( + "context" + "errors" + "net/rpc" + + "google.golang.org/grpc" +) + +// Plugin is the interface that is implemented to serve/connect to an +// inteface implementation. +type Plugin interface { + // Server should return the RPC server compatible struct to serve + // the methods that the Client calls over net/rpc. + Server(*MuxBroker) (interface{}, error) + + // Client returns an interface implementation for the plugin you're + // serving that communicates to the server end of the plugin. + Client(*MuxBroker, *rpc.Client) (interface{}, error) +} + +// GRPCPlugin is the interface that is implemented to serve/connect to +// a plugin over gRPC. +type GRPCPlugin interface { + // GRPCServer should register this plugin for serving with the + // given GRPCServer. Unlike Plugin.Server, this is only called once + // since gRPC plugins serve singletons. + GRPCServer(*GRPCBroker, *grpc.Server) error + + // GRPCClient should return the interface implementation for the plugin + // you're serving via gRPC. The provided context will be canceled by + // go-plugin in the event of the plugin process exiting. + GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error) +} + +// NetRPCUnsupportedPlugin implements Plugin but returns errors for the +// Server and Client functions. This will effectively disable support for +// net/rpc based plugins. +// +// This struct can be embedded in your struct. +type NetRPCUnsupportedPlugin struct{} + +func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} + +func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go new file mode 100644 index 00000000000..88c999a580d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process.go @@ -0,0 +1,24 @@ +package plugin + +import ( + "time" +) + +// pidAlive checks whether a pid is alive. +func pidAlive(pid int) bool { + return _pidAlive(pid) +} + +// pidWait blocks for a process to exit. +func pidWait(pid int) error { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for range ticker.C { + if !pidAlive(pid) { + break + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go new file mode 100644 index 00000000000..185957f8d11 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_posix.go @@ -0,0 +1,20 @@ +//go:build !windows +// +build !windows + +package plugin + +import ( + "os" + "syscall" +) + +// _pidAlive tests whether a process is alive or not by sending it Signal 0, +// since Go otherwise has no way to test this. +func _pidAlive(pid int) bool { + proc, err := os.FindProcess(pid) + if err == nil { + err = proc.Signal(syscall.Signal(0)) + } + + return err == nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/process_windows.go new file mode 100644 index 00000000000..0eaa7705d22 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_windows.go @@ -0,0 +1,30 @@ +package plugin + +import ( + "syscall" +) + +const ( + // Weird name but matches the MSDN docs + exit_STILL_ACTIVE = 259 + + processDesiredAccess = syscall.STANDARD_RIGHTS_READ | + syscall.PROCESS_QUERY_INFORMATION | + syscall.SYNCHRONIZE +) + +// _pidAlive tests whether a process is alive or not +func _pidAlive(pid int) bool { + h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid)) + if err != nil { + return false + } + defer syscall.CloseHandle(h) + + var ec uint32 + if e := syscall.GetExitCodeProcess(h, &ec); e != nil { + return false + } + + return ec == exit_STILL_ACTIVE +} diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go new file mode 100644 index 00000000000..0cfc19e52d6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/protocol.go @@ -0,0 +1,45 @@ +package plugin + +import ( + "io" + "net" +) + +// Protocol is an enum representing the types of protocols. +type Protocol string + +const ( + ProtocolInvalid Protocol = "" + ProtocolNetRPC Protocol = "netrpc" + ProtocolGRPC Protocol = "grpc" +) + +// ServerProtocol is an interface that must be implemented for new plugin +// protocols to be servers. +type ServerProtocol interface { + // Init is called once to configure and initialize the protocol, but + // not start listening. This is the point at which all validation should + // be done and errors returned. + Init() error + + // Config is extra configuration to be outputted to stdout. This will + // be automatically base64 encoded to ensure it can be parsed properly. + // This can be an empty string if additional configuration is not needed. + Config() string + + // Serve is called to serve connections on the given listener. This should + // continue until the listener is closed. + Serve(net.Listener) +} + +// ClientProtocol is an interface that must be implemented for new plugin +// protocols to be clients. +type ClientProtocol interface { + io.Closer + + // Dispense dispenses a new instance of the plugin with the given name. + Dispense(string) (interface{}, error) + + // Ping checks that the client connection is still healthy. + Ping() error +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go new file mode 100644 index 00000000000..f30a4b1d387 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go @@ -0,0 +1,170 @@ +package plugin + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/rpc" + + "github.com/hashicorp/yamux" +) + +// RPCClient connects to an RPCServer over net/rpc to dispense plugin types. +type RPCClient struct { + broker *MuxBroker + control *rpc.Client + plugins map[string]Plugin + + // These are the streams used for the various stdout/err overrides + stdout, stderr net.Conn +} + +// newRPCClient creates a new RPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newRPCClient(c *Client) (*RPCClient, error) { + // Connect to the client + conn, err := net.Dial(c.address.Network(), c.address.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + if c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + // Create the actual RPC client + result, err := NewRPCClient(conn, c.config.Plugins) + if err != nil { + conn.Close() + return nil, err + } + + // Begin the stream syncing so that stdin, out, err work properly + err = result.SyncStreams( + c.config.SyncStdout, + c.config.SyncStderr) + if err != nil { + result.Close() + return nil, err + } + + return result, nil +} + +// NewRPCClient creates a client from an already-open connection-like value. +// Dial is typically used instead. +func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { + // Create the yamux client so we can multiplex + mux, err := yamux.Client(conn, nil) + if err != nil { + conn.Close() + return nil, err + } + + // Connect to the control stream. + control, err := mux.Open() + if err != nil { + mux.Close() + return nil, err + } + + // Connect stdout, stderr streams + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Open() + if err != nil { + mux.Close() + return nil, err + } + } + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Build the client using our broker and control channel. + return &RPCClient{ + broker: broker, + control: rpc.NewClient(control), + plugins: plugins, + stdout: stdstream[0], + stderr: stdstream[1], + }, nil +} + +// SyncStreams should be called to enable syncing of stdout, +// stderr with the plugin. +// +// This will return immediately and the syncing will continue to happen +// in the background. You do not need to launch this in a goroutine itself. +// +// This should never be called multiple times. +func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error { + go copyStream("stdout", stdout, c.stdout) + go copyStream("stderr", stderr, c.stderr) + return nil +} + +// Close closes the connection. The client is no longer usable after this +// is called. +func (c *RPCClient) Close() error { + // Call the control channel and ask it to gracefully exit. If this + // errors, then we save it so that we always return an error but we + // want to try to close the other channels anyways. + var empty struct{} + returnErr := c.control.Call("Control.Quit", true, &empty) + + // Close the other streams we have + if err := c.control.Close(); err != nil { + return err + } + if err := c.stdout.Close(); err != nil { + return err + } + if err := c.stderr.Close(); err != nil { + return err + } + if err := c.broker.Close(); err != nil { + return err + } + + // Return back the error we got from Control.Quit. This is very important + // since we MUST return non-nil error if this fails so that Client.Kill + // will properly try a process.Kill. + return returnErr +} + +func (c *RPCClient) Dispense(name string) (interface{}, error) { + p, ok := c.plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + var id uint32 + if err := c.control.Call( + "Dispenser.Dispense", name, &id); err != nil { + return nil, err + } + + conn, err := c.broker.Dial(id) + if err != nil { + return nil, err + } + + return p.Client(c.broker, rpc.NewClient(conn)) +} + +// Ping pings the connection to ensure it is still alive. +// +// The error from the RPC call is returned exactly if you want to inspect +// it for further error analysis. Any error returned from here would indicate +// that the connection to the plugin is not healthy. +func (c *RPCClient) Ping() error { + var empty struct{} + return c.control.Call("Control.Ping", true, &empty) +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go new file mode 100644 index 00000000000..064809d2918 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -0,0 +1,206 @@ +package plugin + +import ( + "errors" + "fmt" + "io" + "log" + "net" + "net/rpc" + "sync" + + "github.com/hashicorp/yamux" +) + +// RPCServer listens for network connections and then dispenses interface +// implementations over net/rpc. +// +// After setting the fields below, they shouldn't be read again directly +// from the structure which may be reading/writing them concurrently. +type RPCServer struct { + Plugins map[string]Plugin + + // Stdout, Stderr are what this server will use instead of the + // normal stdin/out/err. This is because due to the multi-process nature + // of our plugin system, we can't use the normal process values so we + // make our own custom one we pipe across. + Stdout io.Reader + Stderr io.Reader + + // DoneCh should be set to a non-nil channel that will be closed + // when the control requests the RPC server to end. + DoneCh chan<- struct{} + + lock sync.Mutex +} + +// ServerProtocol impl. +func (s *RPCServer) Init() error { return nil } + +// ServerProtocol impl. +func (s *RPCServer) Config() string { return "" } + +// ServerProtocol impl. +func (s *RPCServer) Serve(lis net.Listener) { + defer s.done() + + for { + conn, err := lis.Accept() + if err != nil { + severity := "ERR" + if errors.Is(err, net.ErrClosed) { + severity = "DEBUG" + } + log.Printf("[%s] plugin: plugin server: %s", severity, err) + return + } + + go s.ServeConn(conn) + } +} + +// ServeConn runs a single connection. +// +// ServeConn blocks, serving the connection until the client hangs up. +func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) { + // First create the yamux server to wrap this connection + mux, err := yamux.Server(conn, nil) + if err != nil { + conn.Close() + log.Printf("[ERR] plugin: error creating yamux server: %s", err) + return + } + + // Accept the control connection + control, err := mux.Accept() + if err != nil { + mux.Close() + if err != io.EOF { + log.Printf("[ERR] plugin: error accepting control connection: %s", err) + } + + return + } + + // Connect the stdstreams (in, out, err) + stdstream := make([]net.Conn, 2) + for i := range stdstream { + stdstream[i], err = mux.Accept() + if err != nil { + mux.Close() + log.Printf("[ERR] plugin: accepting stream %d: %s", i, err) + return + } + } + + // Copy std streams out to the proper place + go copyStream("stdout", stdstream[0], s.Stdout) + go copyStream("stderr", stdstream[1], s.Stderr) + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Use the control connection to build the dispenser and serve the + // connection. + server := rpc.NewServer() + server.RegisterName("Control", &controlServer{ + server: s, + }) + server.RegisterName("Dispenser", &dispenseServer{ + broker: broker, + plugins: s.Plugins, + }) + server.ServeConn(control) +} + +// done is called internally by the control server to trigger the +// doneCh to close which is listened to by the main process to cleanly +// exit. +func (s *RPCServer) done() { + s.lock.Lock() + defer s.lock.Unlock() + + if s.DoneCh != nil { + close(s.DoneCh) + s.DoneCh = nil + } +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type controlServer struct { + server *RPCServer +} + +// Ping can be called to verify the connection (and likely the binary) +// is still alive to a plugin. +func (c *controlServer) Ping( + null bool, response *struct{}, +) error { + *response = struct{}{} + return nil +} + +func (c *controlServer) Quit( + null bool, response *struct{}, +) error { + // End the server + c.server.done() + + // Always return true + *response = struct{}{} + + return nil +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type dispenseServer struct { + broker *MuxBroker + plugins map[string]Plugin +} + +func (d *dispenseServer) Dispense( + name string, response *uint32, +) error { + // Find the function to create this implementation + p, ok := d.plugins[name] + if !ok { + return fmt.Errorf("unknown plugin type: %s", name) + } + + // Create the implementation first so we know if there is an error. + impl, err := p.Server(d.broker) + if err != nil { + // We turn the error into an errors error so that it works across RPC + return errors.New(err.Error()) + } + + // Reserve an ID for our implementation + id := d.broker.NextId() + *response = id + + // Run the rest in a goroutine since it can only happen once this RPC + // call returns. We wait for a connection for the plugin implementation + // and serve it. + go func() { + conn, err := d.broker.Accept(id) + if err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err) + return + } + + serve(conn, "Plugin", impl) + }() + + return nil +} + +func serve(conn io.ReadWriteCloser, name string, v interface{}) { + server := rpc.NewServer() + if err := server.RegisterName(name, v); err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s", err) + return + } + + server.ServeConn(conn) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go new file mode 100644 index 00000000000..e134999103f --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -0,0 +1,591 @@ +package plugin + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/signal" + "runtime" + "sort" + "strconv" + "strings" + + hclog "github.com/hashicorp/go-hclog" + "google.golang.org/grpc" +) + +// CoreProtocolVersion is the ProtocolVersion of the plugin system itself. +// We will increment this whenever we change any protocol behavior. This +// will invalidate any prior plugins but will at least allow us to iterate +// on the core in a safe way. We will do our best to do this very +// infrequently. +const CoreProtocolVersion = 1 + +// HandshakeConfig is the configuration used by client and servers to +// handshake before starting a plugin connection. This is embedded by +// both ServeConfig and ClientConfig. +// +// In practice, the plugin host creates a HandshakeConfig that is exported +// and plugins then can easily consume it. +type HandshakeConfig struct { + // ProtocolVersion is the version that clients must match on to + // agree they can communicate. This should match the ProtocolVersion + // set on ClientConfig when using a plugin. + // This field is not required if VersionedPlugins are being used in the + // Client or Server configurations. + ProtocolVersion uint + + // MagicCookieKey and value are used as a very basic verification + // that a plugin is intended to be launched. This is not a security + // measure, just a UX feature. If the magic cookie doesn't match, + // we show human-friendly output. + MagicCookieKey string + MagicCookieValue string +} + +// PluginSet is a set of plugins provided to be registered in the plugin +// server. +type PluginSet map[string]Plugin + +// ServeConfig configures what sorts of plugins are served. +type ServeConfig struct { + // HandshakeConfig is the configuration that must match clients. + HandshakeConfig + + // TLSProvider is a function that returns a configured tls.Config. + TLSProvider func() (*tls.Config, error) + + // Plugins are the plugins that are served. + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet + + // GRPCServer should be non-nil to enable serving the plugins over + // gRPC. This is a function to create the server when needed with the + // given server options. The server options populated by go-plugin will + // be for TLS if set. You may modify the input slice. + // + // Note that the grpc.Server will automatically be registered with + // the gRPC health checking service. This is not optional since go-plugin + // relies on this to implement Ping(). + GRPCServer func([]grpc.ServerOption) *grpc.Server + + // Logger is used to pass a logger into the server. If none is provided the + // server will create a default logger. + Logger hclog.Logger + + // Test, if non-nil, will put plugin serving into "test mode". This is + // meant to be used as part of `go test` within a plugin's codebase to + // launch the plugin in-process and output a ReattachConfig. + // + // This changes the behavior of the server in a number of ways to + // accomodate the expectation of running in-process: + // + // * The handshake cookie is not validated. + // * Stdout/stderr will receive plugin reads and writes + // * Connection information will not be sent to stdout + // + Test *ServeTestConfig +} + +// ServeTestConfig configures plugin serving for test mode. See ServeConfig.Test. +type ServeTestConfig struct { + // Context, if set, will force the plugin serving to end when cancelled. + // This is only a test configuration because the non-test configuration + // expects to take over the process and therefore end on an interrupt or + // kill signal. For tests, we need to kill the plugin serving routinely + // and this provides a way to do so. + // + // If you want to wait for the plugin process to close before moving on, + // you can wait on CloseCh. + Context context.Context + + // If this channel is non-nil, we will send the ReattachConfig via + // this channel. This can be encoded (via JSON recommended) to the + // plugin client to attach to this plugin. + ReattachConfigCh chan<- *ReattachConfig + + // CloseCh, if non-nil, will be closed when serving exits. This can be + // used along with Context to determine when the server is fully shut down. + // If this is not set, you can still use Context on its own, but note there + // may be a period of time between canceling the context and the plugin + // server being shut down. + CloseCh chan<- struct{} + + // SyncStdio, if true, will enable the client side "SyncStdout/Stderr" + // functionality to work. This defaults to false because the implementation + // of making this work within test environments is particularly messy + // and SyncStdio functionality is fairly rare, so we default to the simple + // scenario. + SyncStdio bool +} + +// protocolVersion determines the protocol version and plugin set to be used by +// the server. In the event that there is no suitable version, the last version +// in the config is returned leaving the client to report the incompatibility. +func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) { + protoVersion := int(opts.ProtocolVersion) + pluginSet := opts.Plugins + protoType := ProtocolNetRPC + // Check if the client sent a list of acceptable versions + var clientVersions []int + if vs := os.Getenv("PLUGIN_PROTOCOL_VERSIONS"); vs != "" { + for _, s := range strings.Split(vs, ",") { + v, err := strconv.Atoi(s) + if err != nil { + fmt.Fprintf(os.Stderr, "server sent invalid plugin version %q", s) + continue + } + clientVersions = append(clientVersions, v) + } + } + + // We want to iterate in reverse order, to ensure we match the newest + // compatible plugin version. + sort.Sort(sort.Reverse(sort.IntSlice(clientVersions))) + + // set the old un-versioned fields as if they were versioned plugins + if opts.VersionedPlugins == nil { + opts.VersionedPlugins = make(map[int]PluginSet) + } + + if pluginSet != nil { + opts.VersionedPlugins[protoVersion] = pluginSet + } + + // Sort the version to make sure we match the latest first + var versions []int + for v := range opts.VersionedPlugins { + versions = append(versions, v) + } + + sort.Sort(sort.Reverse(sort.IntSlice(versions))) + + // See if we have multiple versions of Plugins to choose from + for _, version := range versions { + // Record each version, since we guarantee that this returns valid + // values even if they are not a protocol match. + protoVersion = version + pluginSet = opts.VersionedPlugins[version] + + // If we have a configured gRPC server we should select a protocol + if opts.GRPCServer != nil { + // All plugins in a set must use the same transport, so check the first + // for the protocol type + for _, p := range pluginSet { + switch p.(type) { + case GRPCPlugin: + protoType = ProtocolGRPC + default: + protoType = ProtocolNetRPC + } + break + } + } + + for _, clientVersion := range clientVersions { + if clientVersion == protoVersion { + return protoVersion, protoType, pluginSet + } + } + } + + // Return the lowest version as the fallback. + // Since we iterated over all the versions in reverse order above, these + // values are from the lowest version number plugins (which may be from + // a combination of the Handshake.ProtocolVersion and ServeConfig.Plugins + // fields). This allows serving the oldest version of our plugins to a + // legacy client that did not send a PLUGIN_PROTOCOL_VERSIONS list. + return protoVersion, protoType, pluginSet +} + +// Serve serves the plugins given by ServeConfig. +// +// Serve doesn't return until the plugin is done being executed. Any +// fixable errors will be output to os.Stderr and the process will +// exit with a status code of 1. Serve will panic for unexpected +// conditions where a user's fix is unknown. +// +// This is the method that plugins should call in their main() functions. +func Serve(opts *ServeConfig) { + exitCode := -1 + // We use this to trigger an `os.Exit` so that we can execute our other + // deferred functions. In test mode, we just output the err to stderr + // and return. + defer func() { + if opts.Test == nil && exitCode >= 0 { + os.Exit(exitCode) + } + + if opts.Test != nil && opts.Test.CloseCh != nil { + close(opts.Test.CloseCh) + } + }() + + if opts.Test == nil { + // Validate the handshake config + if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { + fmt.Fprintf(os.Stderr, + "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ + "key or value was set. Please notify the plugin author and report\n"+ + "this as a bug.\n") + exitCode = 1 + return + } + + // First check the cookie + if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { + fmt.Fprintf(os.Stderr, + "This binary is a plugin. These are not meant to be executed directly.\n"+ + "Please execute the program that consumes these plugins, which will\n"+ + "load any plugins automatically\n") + exitCode = 1 + return + } + } + + // negotiate the version and plugins + // start with default version in the handshake config + protoVersion, protoType, pluginSet := protocolVersion(opts) + + logger := opts.Logger + if logger == nil { + // internal logger to os.Stderr + logger = hclog.New(&hclog.LoggerOptions{ + Level: hclog.Trace, + Output: os.Stderr, + JSONFormat: true, + }) + } + + // Register a listener so we can accept a connection + listener, err := serverListener() + if err != nil { + logger.Error("plugin init error", "error", err) + return + } + + // Close the listener on return. We wrap this in a func() on purpose + // because the "listener" reference may change to TLS. + defer func() { + listener.Close() + }() + + var tlsConfig *tls.Config + if opts.TLSProvider != nil { + tlsConfig, err = opts.TLSProvider() + if err != nil { + logger.Error("plugin tls init", "error", err) + return + } + } + + var serverCert string + clientCert := os.Getenv("PLUGIN_CLIENT_CERT") + // If the client is configured using AutoMTLS, the certificate will be here, + // and we need to generate our own in response. + if tlsConfig == nil && clientCert != "" { + logger.Info("configuring server automatic mTLS") + clientCertPool := x509.NewCertPool() + if !clientCertPool.AppendCertsFromPEM([]byte(clientCert)) { + logger.Error("client cert provided but failed to parse", "cert", clientCert) + } + + certPEM, keyPEM, err := generateCert() + if err != nil { + logger.Error("failed to generate server certificate", "error", err) + panic(err) + } + + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + logger.Error("failed to parse server certificate", "error", err) + panic(err) + } + + tlsConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: clientCertPool, + MinVersion: tls.VersionTLS12, + RootCAs: clientCertPool, + ServerName: "localhost", + } + + // We send back the raw leaf cert data for the client rather than the + // PEM, since the protocol can't handle newlines. + serverCert = base64.RawStdEncoding.EncodeToString(cert.Certificate[0]) + } + + // Create the channel to tell us when we're done + doneCh := make(chan struct{}) + + // Create our new stdout, stderr files. These will override our built-in + // stdout/stderr so that it works across the stream boundary. + var stdout_r, stderr_r io.Reader + stdout_r, stdout_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + stderr_r, stderr_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + + // If we're in test mode, we tee off the reader and write the data + // as-is to our normal Stdout and Stderr so that they continue working + // while stdio works. This is because in test mode, we assume we're running + // in `go test` or some equivalent and we want output to go to standard + // locations. + if opts.Test != nil { + // TODO(mitchellh): This isn't super ideal because a TeeReader + // only works if the reader side is actively read. If we never + // connect via a plugin client, the output still gets swallowed. + stdout_r = io.TeeReader(stdout_r, os.Stdout) + stderr_r = io.TeeReader(stderr_r, os.Stderr) + } + + // Build the server type + var server ServerProtocol + switch protoType { + case ProtocolNetRPC: + // If we have a TLS configuration then we wrap the listener + // ourselves and do it at that level. + if tlsConfig != nil { + listener = tls.NewListener(listener, tlsConfig) + } + + // Create the RPC server to dispense + server = &RPCServer{ + Plugins: pluginSet, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + } + + case ProtocolGRPC: + // Create the gRPC server + server = &GRPCServer{ + Plugins: pluginSet, + Server: opts.GRPCServer, + TLS: tlsConfig, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + logger: logger, + } + + default: + panic("unknown server protocol: " + protoType) + } + + // Initialize the servers + if err := server.Init(); err != nil { + logger.Error("protocol init", "error", err) + return + } + + logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) + + // Output the address and service name to stdout so that the client can + // bring it up. In test mode, we don't do this because clients will + // attach via a reattach config. + if opts.Test == nil { + fmt.Printf("%d|%d|%s|%s|%s|%s\n", + CoreProtocolVersion, + protoVersion, + listener.Addr().Network(), + listener.Addr().String(), + protoType, + serverCert) + os.Stdout.Sync() + } else if ch := opts.Test.ReattachConfigCh; ch != nil { + // Send back the reattach config that can be used. This isn't + // quite ready if they connect immediately but the client should + // retry a few times. + ch <- &ReattachConfig{ + Protocol: protoType, + ProtocolVersion: protoVersion, + Addr: listener.Addr(), + Pid: os.Getpid(), + Test: true, + } + } + + // Eat the interrupts. In test mode we disable this so that go test + // can be cancelled properly. + if opts.Test == nil { + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + go func() { + count := 0 + for { + <-ch + count++ + logger.Trace("plugin received interrupt signal, ignoring", "count", count) + } + }() + } + + // Set our stdout, stderr to the stdio stream that clients can retrieve + // using ClientConfig.SyncStdout/err. We only do this for non-test mode + // or if the test mode explicitly requests it. + // + // In test mode, we use a multiwriter so that the data continues going + // to the normal stdout/stderr so output can show up in test logs. We + // also send to the stdio stream so that clients can continue working + // if they depend on that. + if opts.Test == nil || opts.Test.SyncStdio { + if opts.Test != nil { + // In test mode we need to maintain the original values so we can + // reset it. + defer func(out, err *os.File) { + os.Stdout = out + os.Stderr = err + }(os.Stdout, os.Stderr) + } + os.Stdout = stdout_w + os.Stderr = stderr_w + } + + // Accept connections and wait for completion + go server.Serve(listener) + + ctx := context.Background() + if opts.Test != nil && opts.Test.Context != nil { + ctx = opts.Test.Context + } + select { + case <-ctx.Done(): + // Cancellation. We can stop the server by closing the listener. + // This isn't graceful at all but this is currently only used by + // tests and its our only way to stop. + listener.Close() + + // If this is a grpc server, then we also ask the server itself to + // end which will kill all connections. There isn't an easy way to do + // this for net/rpc currently but net/rpc is more and more unused. + if s, ok := server.(*GRPCServer); ok { + s.Stop() + } + + // Wait for the server itself to shut down + <-doneCh + + case <-doneCh: + // Note that given the documentation of Serve we should probably be + // setting exitCode = 0 and using os.Exit here. That's how it used to + // work before extracting this library. However, for years we've done + // this so we'll keep this functionality. + } +} + +func serverListener() (net.Listener, error) { + if runtime.GOOS == "windows" { + return serverListener_tcp() + } + + return serverListener_unix() +} + +func serverListener_tcp() (net.Listener, error) { + envMinPort := os.Getenv("PLUGIN_MIN_PORT") + envMaxPort := os.Getenv("PLUGIN_MAX_PORT") + + var minPort, maxPort int64 + var err error + + switch { + case len(envMinPort) == 0: + minPort = 0 + default: + minPort, err = strconv.ParseInt(envMinPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("Couldn't get value from PLUGIN_MIN_PORT: %v", err) + } + } + + switch { + case len(envMaxPort) == 0: + maxPort = 0 + default: + maxPort, err = strconv.ParseInt(envMaxPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("Couldn't get value from PLUGIN_MAX_PORT: %v", err) + } + } + + if minPort > maxPort { + return nil, fmt.Errorf("PLUGIN_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort) + } + + for port := minPort; port <= maxPort; port++ { + address := fmt.Sprintf("127.0.0.1:%d", port) + listener, err := net.Listen("tcp", address) + if err == nil { + return listener, nil + } + } + + return nil, errors.New("Couldn't bind plugin TCP listener") +} + +func serverListener_unix() (net.Listener, error) { + tf, err := ioutil.TempFile("", "plugin") + if err != nil { + return nil, err + } + path := tf.Name() + + // Close the file and remove it because it has to not exist for + // the domain socket. + if err := tf.Close(); err != nil { + return nil, err + } + if err := os.Remove(path); err != nil { + return nil, err + } + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + + // Wrap the listener in rmListener so that the Unix domain socket file + // is removed on close. + return &rmListener{ + Listener: l, + Path: path, + }, nil +} + +// rmListener is an implementation of net.Listener that forwards most +// calls to the listener but also removes a file as part of the close. We +// use this to cleanup the unix domain socket on close. +type rmListener struct { + net.Listener + Path string +} + +func (l *rmListener) Close() error { + // Close the listener itself + if err := l.Listener.Close(); err != nil { + return err + } + + // Remove the file + return os.Remove(l.Path) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go new file mode 100644 index 00000000000..033079ea0fc --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server_mux.go @@ -0,0 +1,31 @@ +package plugin + +import ( + "fmt" + "os" +) + +// ServeMuxMap is the type that is used to configure ServeMux +type ServeMuxMap map[string]*ServeConfig + +// ServeMux is like Serve, but serves multiple types of plugins determined +// by the argument given on the command-line. +// +// This command doesn't return until the plugin is done being executed. Any +// errors are logged or output to stderr. +func ServeMux(m ServeMuxMap) { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, + "Invoked improperly. This is an internal command that shouldn't\n"+ + "be manually invoked.\n") + os.Exit(1) + } + + opts, ok := m[os.Args[1]] + if !ok { + fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1]) + os.Exit(1) + } + + Serve(opts) +} diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go new file mode 100644 index 00000000000..1d547aaaab3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/stream.go @@ -0,0 +1,18 @@ +package plugin + +import ( + "io" + "log" +) + +func copyStream(name string, dst io.Writer, src io.Reader) { + if src == nil { + panic(name + ": src is nil") + } + if dst == nil { + panic(name + ": dst is nil") + } + if _, err := io.Copy(dst, src); err != nil && err != io.EOF { + log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err) + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go new file mode 100644 index 00000000000..e36f2eb2b7c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -0,0 +1,180 @@ +package plugin + +import ( + "bytes" + "context" + "io" + "net" + "net/rpc" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "github.com/mitchellh/go-testing-interface" + "google.golang.org/grpc" +) + +// TestOptions allows specifying options that can affect the behavior of the +// test functions +type TestOptions struct { + //ServerStdout causes the given value to be used in place of a blank buffer + //for RPCServer's Stdout + ServerStdout io.ReadCloser + + //ServerStderr causes the given value to be used in place of a blank buffer + //for RPCServer's Stderr + ServerStderr io.ReadCloser +} + +// The testing file contains test helpers that you can use outside of +// this package for making it easier to test plugins themselves. + +// TestConn is a helper function for returning a client and server +// net.Conn connected to each other. +func TestConn(t testing.T) (net.Conn, net.Conn) { + // Listen to any local port. This listener will be closed + // after a single connection is established. + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start a goroutine to accept our client connection + var serverConn net.Conn + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + defer l.Close() + var err error + serverConn, err = l.Accept() + if err != nil { + t.Fatalf("err: %s", err) + } + }() + + // Connect to the server + clientConn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Wait for the server side to acknowledge it has connected + <-doneCh + + return clientConn, serverConn +} + +// TestRPCConn returns a rpc client and server connected to each other. +func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) { + clientConn, serverConn := TestConn(t) + + server := rpc.NewServer() + go server.ServeConn(serverConn) + + client := rpc.NewClient(clientConn) + return client, server +} + +// TestPluginRPCConn returns a plugin RPC client and server that are connected +// together and configured. +func TestPluginRPCConn(t testing.T, ps map[string]Plugin, opts *TestOptions) (*RPCClient, *RPCServer) { + // Create two net.Conns we can use to shuttle our control connection + clientConn, serverConn := TestConn(t) + + // Start up the server + server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)} + if opts != nil { + if opts.ServerStdout != nil { + server.Stdout = opts.ServerStdout + } + if opts.ServerStderr != nil { + server.Stderr = opts.ServerStderr + } + } + go server.ServeConn(serverConn) + + // Connect the client to the server + client, err := NewRPCClient(clientConn, ps) + if err != nil { + t.Fatalf("err: %s", err) + } + + return client, server +} + +// TestGRPCConn returns a gRPC client conn and grpc server that are connected +// together and configured. The register function is used to register services +// prior to the Serve call. This is used to test gRPC connections. +func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + server := grpc.NewServer() + register(server) + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Connection successful, close the listener + l.Close() + + return conn, server +} + +// TestPluginGRPCConn returns a plugin gRPC client and server that are connected +// together and configured. This is used to test gRPC connections. +func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start up the server + server := &GRPCServer{ + Plugins: ps, + DoneCh: make(chan struct{}), + Server: DefaultGRPCServer, + Stdout: new(bytes.Buffer), + Stderr: new(bytes.Buffer), + logger: hclog.Default(), + } + if err := server.Init(); err != nil { + t.Fatalf("err: %s", err) + } + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, nil) + go broker.Run() + go brokerGRPCClient.StartStream() + + // Create the client + client := &GRPCClient{ + Conn: conn, + Plugins: ps, + broker: broker, + doneCtx: context.Background(), + controller: plugin.NewGRPCControllerClient(conn), + } + + return client, server +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.gitignore b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore new file mode 100644 index 00000000000..4e309e0b326 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore @@ -0,0 +1,4 @@ +.idea/ +*.iml +*.test +.vscode/ \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE new file mode 100644 index 00000000000..e87a115e462 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-retryablehttp/Makefile b/vendor/github.com/hashicorp/go-retryablehttp/Makefile new file mode 100644 index 00000000000..da17640e644 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/Makefile @@ -0,0 +1,11 @@ +default: test + +test: + go vet ./... + go test -race ./... + +updatedeps: + go get -f -t -u ./... + go get -f -u ./... + +.PHONY: default test updatedeps diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md new file mode 100644 index 00000000000..09f5eaf2217 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/README.md @@ -0,0 +1,81 @@ +go-retryablehttp +================ + +[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: http://travis-ci.org/hashicorp/go-retryablehttp +[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp + +The `retryablehttp` package provides a familiar HTTP client interface with +automatic retries and exponential backoff. It is a thin wrapper over the +standard `net/http` client library and exposes nearly the same public API. This +makes `retryablehttp` very easy to drop into existing programs. + +`retryablehttp` performs automatic retries under certain conditions. Mainly, if +an error is returned by the client (connection errors, etc.), or if a 500-range +response code is received (except 501), then a retry is invoked after a wait +period. Otherwise, the response is returned and left to the caller to +interpret. + +The main difference from `net/http` is that requests which take a request body +(POST/PUT et. al) can have the body provided in a number of ways (some more or +less efficient) that allow "rewinding" the request body if the initial request +fails so that the full request can be attempted again. See the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp) for more +details. + +Version 0.6.0 and before are compatible with Go prior to 1.12. From 0.6.1 onward, Go 1.12+ is required. +From 0.6.7 onward, Go 1.13+ is required. + +Example Use +=========== + +Using this library should look almost identical to what you would do with +`net/http`. The most simple example of a GET request is shown below: + +```go +resp, err := retryablehttp.Get("/foo") +if err != nil { + panic(err) +} +``` + +The returned response object is an `*http.Response`, the same thing you would +usually get from `net/http`. Had the request failed one or more times, the above +call would block and retry with exponential backoff. + +## Retrying cases that fail after a seeming success + +It's possible for a request to succeed in the sense that the expected response headers are received, but then to encounter network-level errors while reading the response body. In go-retryablehttp's most basic usage, this error would not be retryable, due to the out-of-band handling of the response body. In some cases it may be desirable to handle the response body as part of the retryable operation. + +A toy example (which will retry the full request and succeed on the second attempt) is shown below: + +```go +c := retryablehttp.NewClient() +r := retryablehttp.NewRequest("GET", "://foo", nil) +handlerShouldRetry := true +r.SetResponseHandler(func(*http.Response) error { + if !handlerShouldRetry { + return nil + } + handlerShouldRetry = false + return errors.New("retryable error") +}) +``` + +## Getting a stdlib `*http.Client` with retries + +It's possible to convert a `*retryablehttp.Client` directly to a `*http.Client`. +This makes use of retryablehttp broadly applicable with minimal effort. Simply +configure a `*retryablehttp.Client` as you wish, and then call `StandardClient()`: + +```go +retryClient := retryablehttp.NewClient() +retryClient.RetryMax = 10 + +standardClient := retryClient.StandardClient() // *http.Client +``` + +For more usage and examples see the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go new file mode 100644 index 00000000000..57116e96072 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -0,0 +1,815 @@ +// Package retryablehttp provides a familiar HTTP client interface with +// automatic retries and exponential backoff. It is a thin wrapper over the +// standard net/http client library and exposes nearly the same public API. +// This makes retryablehttp very easy to drop into existing programs. +// +// retryablehttp performs automatic retries under certain conditions. Mainly, if +// an error is returned by the client (connection errors etc), or if a 500-range +// response is received, then a retry is invoked. Otherwise, the response is +// returned and left to the caller to interpret. +// +// Requests which take a request body should provide a non-nil function +// parameter. The best choice is to provide either a function satisfying +// ReaderFunc which provides multiple io.Readers in an efficient manner, a +// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte +// slice. As it is a reference type, and we will wrap it as needed by readers, +// we can efficiently re-use the request body without needing to copy it. If an +// io.Reader (such as a *bytes.Reader) is provided, the full body will be read +// prior to the first request, and will be efficiently re-used for any retries. +// ReadSeeker can be used, but some users have observed occasional data races +// between the net/http library and the Seek functionality of some +// implementations of ReadSeeker, so should be avoided if possible. +package retryablehttp + +import ( + "bytes" + "context" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "math/rand" + "net/http" + "net/url" + "os" + "regexp" + "strconv" + "strings" + "sync" + "time" + + cleanhttp "github.com/hashicorp/go-cleanhttp" +) + +var ( + // Default retry configuration + defaultRetryWaitMin = 1 * time.Second + defaultRetryWaitMax = 30 * time.Second + defaultRetryMax = 4 + + // defaultLogger is the logger provided with defaultClient + defaultLogger = log.New(os.Stderr, "", log.LstdFlags) + + // defaultClient is used for performing requests without explicitly making + // a new client. It is purposely private to avoid modifications. + defaultClient = NewClient() + + // We need to consume response bodies to maintain http connections, but + // limit the size we consume to respReadLimit. + respReadLimit = int64(4096) + + // A regular expression to match the error returned by net/http when the + // configured number of redirects is exhausted. This error isn't typed + // specifically so we resort to matching on the error string. + redirectsErrorRe = regexp.MustCompile(`stopped after \d+ redirects\z`) + + // A regular expression to match the error returned by net/http when the + // scheme specified in the URL is invalid. This error isn't typed + // specifically so we resort to matching on the error string. + schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`) + + // A regular expression to match the error returned by net/http when the + // TLS certificate is not trusted. This error isn't typed + // specifically so we resort to matching on the error string. + notTrustedErrorRe = regexp.MustCompile(`certificate is not trusted`) +) + +// ReaderFunc is the type of function that can be given natively to NewRequest +type ReaderFunc func() (io.Reader, error) + +// ResponseHandlerFunc is a type of function that takes in a Response, and does something with it. +// It only runs if the initial part of the request was successful. +// If an error is returned, the client's retry policy will be used to determine whether to retry the whole request. +type ResponseHandlerFunc func(*http.Response) error + +// LenReader is an interface implemented by many in-memory io.Reader's. Used +// for automatically sending the right Content-Length header when possible. +type LenReader interface { + Len() int +} + +// Request wraps the metadata needed to create HTTP requests. +type Request struct { + // body is a seekable reader over the request body payload. This is + // used to rewind the request data in between retries. + body ReaderFunc + + responseHandler ResponseHandlerFunc + + // Embed an HTTP request directly. This makes a *Request act exactly + // like an *http.Request so that all meta methods are supported. + *http.Request +} + +// WithContext returns wrapped Request with a shallow copy of underlying *http.Request +// with its context changed to ctx. The provided ctx must be non-nil. +func (r *Request) WithContext(ctx context.Context) *Request { + return &Request{ + body: r.body, + responseHandler: r.responseHandler, + Request: r.Request.WithContext(ctx), + } +} + +// SetResponseHandler allows setting the response handler. +func (r *Request) SetResponseHandler(fn ResponseHandlerFunc) { + r.responseHandler = fn +} + +// BodyBytes allows accessing the request body. It is an analogue to +// http.Request's Body variable, but it returns a copy of the underlying data +// rather than consuming it. +// +// This function is not thread-safe; do not call it at the same time as another +// call, or at the same time this request is being used with Client.Do. +func (r *Request) BodyBytes() ([]byte, error) { + if r.body == nil { + return nil, nil + } + body, err := r.body() + if err != nil { + return nil, err + } + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(body) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// SetBody allows setting the request body. +// +// It is useful if a new body needs to be set without constructing a new Request. +func (r *Request) SetBody(rawBody interface{}) error { + bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) + if err != nil { + return err + } + r.body = bodyReader + r.ContentLength = contentLength + return nil +} + +// WriteTo allows copying the request body into a writer. +// +// It writes data to w until there's no more data to write or +// when an error occurs. The return int64 value is the number of bytes +// written. Any error encountered during the write is also returned. +// The signature matches io.WriterTo interface. +func (r *Request) WriteTo(w io.Writer) (int64, error) { + body, err := r.body() + if err != nil { + return 0, err + } + if c, ok := body.(io.Closer); ok { + defer c.Close() + } + return io.Copy(w, body) +} + +func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, error) { + var bodyReader ReaderFunc + var contentLength int64 + + switch body := rawBody.(type) { + // If they gave us a function already, great! Use it. + case ReaderFunc: + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + case func() (io.Reader, error): + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + // If a regular byte slice, we can read it over and over via new + // readers + case []byte: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // If a bytes.Buffer we can read the underlying byte slice over and + // over + case *bytes.Buffer: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf.Bytes()), nil + } + contentLength = int64(buf.Len()) + + // We prioritize *bytes.Reader here because we don't really want to + // deal with it seeking so want it to match here instead of the + // io.ReadSeeker case. + case *bytes.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err + } + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // Compat case + case io.ReadSeeker: + raw := body + bodyReader = func() (io.Reader, error) { + _, err := raw.Seek(0, 0) + return ioutil.NopCloser(raw), err + } + if lr, ok := raw.(LenReader); ok { + contentLength = int64(lr.Len()) + } + + // Read all in so we can reset + case io.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err + } + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // No body provided, nothing to do + case nil: + + // Unrecognized type + default: + return nil, 0, fmt.Errorf("cannot handle type %T", rawBody) + } + return bodyReader, contentLength, nil +} + +// FromRequest wraps an http.Request in a retryablehttp.Request +func FromRequest(r *http.Request) (*Request, error) { + bodyReader, _, err := getBodyReaderAndContentLength(r.Body) + if err != nil { + return nil, err + } + // Could assert contentLength == r.ContentLength + return &Request{body: bodyReader, Request: r}, nil +} + +// NewRequest creates a new wrapped request. +func NewRequest(method, url string, rawBody interface{}) (*Request, error) { + return NewRequestWithContext(context.Background(), method, url, rawBody) +} + +// NewRequestWithContext creates a new wrapped request with the provided context. +// +// The context controls the entire lifetime of a request and its response: +// obtaining a connection, sending the request, and reading the response headers and body. +func NewRequestWithContext(ctx context.Context, method, url string, rawBody interface{}) (*Request, error) { + bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) + if err != nil { + return nil, err + } + + httpReq, err := http.NewRequestWithContext(ctx, method, url, nil) + if err != nil { + return nil, err + } + httpReq.ContentLength = contentLength + + return &Request{body: bodyReader, Request: httpReq}, nil +} + +// Logger interface allows to use other loggers than +// standard log.Logger. +type Logger interface { + Printf(string, ...interface{}) +} + +// LeveledLogger is an interface that can be implemented by any logger or a +// logger wrapper to provide leveled logging. The methods accept a message +// string and a variadic number of key-value pairs. For log.Printf style +// formatting where message string contains a format specifier, use Logger +// interface. +type LeveledLogger interface { + Error(msg string, keysAndValues ...interface{}) + Info(msg string, keysAndValues ...interface{}) + Debug(msg string, keysAndValues ...interface{}) + Warn(msg string, keysAndValues ...interface{}) +} + +// hookLogger adapts an LeveledLogger to Logger for use by the existing hook functions +// without changing the API. +type hookLogger struct { + LeveledLogger +} + +func (h hookLogger) Printf(s string, args ...interface{}) { + h.Info(fmt.Sprintf(s, args...)) +} + +// RequestLogHook allows a function to run before each retry. The HTTP +// request which will be made, and the retry number (0 for the initial +// request) are available to users. The internal logger is exposed to +// consumers. +type RequestLogHook func(Logger, *http.Request, int) + +// ResponseLogHook is like RequestLogHook, but allows running a function +// on each HTTP response. This function will be invoked at the end of +// every HTTP request executed, regardless of whether a subsequent retry +// needs to be performed or not. If the response body is read or closed +// from this method, this will affect the response returned from Do(). +type ResponseLogHook func(Logger, *http.Response) + +// CheckRetry specifies a policy for handling retries. It is called +// following each request with the response and error values returned by +// the http.Client. If CheckRetry returns false, the Client stops retrying +// and returns the response to the caller. If CheckRetry returns an error, +// that error value is returned in lieu of the error from the request. The +// Client will close any response body when retrying, but if the retry is +// aborted it is up to the CheckRetry callback to properly close any +// response body before returning. +type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error) + +// Backoff specifies a policy for how long to wait between retries. +// It is called after a failing request to determine the amount of time +// that should pass before trying again. +type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration + +// ErrorHandler is called if retries are expired, containing the last status +// from the http library. If not specified, default behavior for the library is +// to close the body and return an error indicating how many tries were +// attempted. If overriding this, be sure to close the body if needed. +type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error) + +// Client is used to make HTTP requests. It adds additional functionality +// like automatic retries to tolerate minor outages. +type Client struct { + HTTPClient *http.Client // Internal HTTP client. + Logger interface{} // Customer logger instance. Can be either Logger or LeveledLogger + + RetryWaitMin time.Duration // Minimum time to wait + RetryWaitMax time.Duration // Maximum time to wait + RetryMax int // Maximum number of retries + + // RequestLogHook allows a user-supplied function to be called + // before each retry. + RequestLogHook RequestLogHook + + // ResponseLogHook allows a user-supplied function to be called + // with the response from each HTTP request executed. + ResponseLogHook ResponseLogHook + + // CheckRetry specifies the policy for handling retries, and is called + // after each request. The default policy is DefaultRetryPolicy. + CheckRetry CheckRetry + + // Backoff specifies the policy for how long to wait between retries + Backoff Backoff + + // ErrorHandler specifies the custom error handler to use, if any + ErrorHandler ErrorHandler + + loggerInit sync.Once + clientInit sync.Once +} + +// NewClient creates a new Client with default settings. +func NewClient() *Client { + return &Client{ + HTTPClient: cleanhttp.DefaultPooledClient(), + Logger: defaultLogger, + RetryWaitMin: defaultRetryWaitMin, + RetryWaitMax: defaultRetryWaitMax, + RetryMax: defaultRetryMax, + CheckRetry: DefaultRetryPolicy, + Backoff: DefaultBackoff, + } +} + +func (c *Client) logger() interface{} { + c.loggerInit.Do(func() { + if c.Logger == nil { + return + } + + switch c.Logger.(type) { + case Logger, LeveledLogger: + // ok + default: + // This should happen in dev when they are setting Logger and work on code, not in prod. + panic(fmt.Sprintf("invalid logger type passed, must be Logger or LeveledLogger, was %T", c.Logger)) + } + }) + + return c.Logger +} + +// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which +// will retry on connection errors and server errors. +func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + + // don't propagate other errors + shouldRetry, _ := baseRetryPolicy(resp, err) + return shouldRetry, nil +} + +// ErrorPropagatedRetryPolicy is the same as DefaultRetryPolicy, except it +// propagates errors back instead of returning nil. This allows you to inspect +// why it decided to retry or not. +func ErrorPropagatedRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + + return baseRetryPolicy(resp, err) +} + +func baseRetryPolicy(resp *http.Response, err error) (bool, error) { + if err != nil { + if v, ok := err.(*url.Error); ok { + // Don't retry if the error was due to too many redirects. + if redirectsErrorRe.MatchString(v.Error()) { + return false, v + } + + // Don't retry if the error was due to an invalid protocol scheme. + if schemeErrorRe.MatchString(v.Error()) { + return false, v + } + + // Don't retry if the error was due to TLS cert verification failure. + if notTrustedErrorRe.MatchString(v.Error()) { + return false, v + } + if _, ok := v.Err.(x509.UnknownAuthorityError); ok { + return false, v + } + } + + // The error is likely recoverable so retry. + return true, nil + } + + // 429 Too Many Requests is recoverable. Sometimes the server puts + // a Retry-After response header to indicate when the server is + // available to start processing request from client. + if resp.StatusCode == http.StatusTooManyRequests { + return true, nil + } + + // Check the response code. We retry on 500-range responses to allow + // the server time to recover, as 500's are typically not permanent + // errors and may relate to outages on the server side. This will catch + // invalid response codes as well, like 0 and 999. + if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != http.StatusNotImplemented) { + return true, fmt.Errorf("unexpected HTTP status %s", resp.Status) + } + + return false, nil +} + +// DefaultBackoff provides a default callback for Client.Backoff which +// will perform exponential backoff based on the attempt number and limited +// by the provided minimum and maximum durations. +// +// It also tries to parse Retry-After response header when a http.StatusTooManyRequests +// (HTTP Code 429) is found in the resp parameter. Hence it will return the number of +// seconds the server states it may be ready to process more requests from this client. +func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + if resp != nil { + if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable { + if s, ok := resp.Header["Retry-After"]; ok { + if sleep, err := strconv.ParseInt(s[0], 10, 64); err == nil { + return time.Second * time.Duration(sleep) + } + } + } + } + + mult := math.Pow(2, float64(attemptNum)) * float64(min) + sleep := time.Duration(mult) + if float64(sleep) != mult || sleep > max { + sleep = max + } + return sleep +} + +// LinearJitterBackoff provides a callback for Client.Backoff which will +// perform linear backoff based on the attempt number and with jitter to +// prevent a thundering herd. +// +// min and max here are *not* absolute values. The number to be multiplied by +// the attempt number will be chosen at random from between them, thus they are +// bounding the jitter. +// +// For instance: +// * To get strictly linear backoff of one second increasing each retry, set +// both to one second (1s, 2s, 3s, 4s, ...) +// * To get a small amount of jitter centered around one second increasing each +// retry, set to around one second, such as a min of 800ms and max of 1200ms +// (892ms, 2102ms, 2945ms, 4312ms, ...) +// * To get extreme jitter, set to a very wide spread, such as a min of 100ms +// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...) +func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + // attemptNum always starts at zero but we want to start at 1 for multiplication + attemptNum++ + + if max <= min { + // Unclear what to do here, or they are the same, so return min * + // attemptNum + return min * time.Duration(attemptNum) + } + + // Seed rand; doing this every time is fine + rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + + // Pick a random number that lies somewhere between the min and max and + // multiply by the attemptNum. attemptNum starts at zero so we always + // increment here. We first get a random percentage, then apply that to the + // difference between min and max, and add to min. + jitter := rand.Float64() * float64(max-min) + jitterMin := int64(jitter) + int64(min) + return time.Duration(jitterMin * int64(attemptNum)) +} + +// PassthroughErrorHandler is an ErrorHandler that directly passes through the +// values from the net/http library for the final request. The body is not +// closed. +func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) { + return resp, err +} + +// Do wraps calling an HTTP method with retries. +func (c *Client) Do(req *Request) (*http.Response, error) { + c.clientInit.Do(func() { + if c.HTTPClient == nil { + c.HTTPClient = cleanhttp.DefaultPooledClient() + } + }) + + logger := c.logger() + + if logger != nil { + switch v := logger.(type) { + case LeveledLogger: + v.Debug("performing request", "method", req.Method, "url", req.URL) + case Logger: + v.Printf("[DEBUG] %s %s", req.Method, req.URL) + } + } + + var resp *http.Response + var attempt int + var shouldRetry bool + var doErr, respErr, checkErr error + + for i := 0; ; i++ { + doErr, respErr = nil, nil + attempt++ + + // Always rewind the request body when non-nil. + if req.body != nil { + body, err := req.body() + if err != nil { + c.HTTPClient.CloseIdleConnections() + return resp, err + } + if c, ok := body.(io.ReadCloser); ok { + req.Body = c + } else { + req.Body = ioutil.NopCloser(body) + } + } + + if c.RequestLogHook != nil { + switch v := logger.(type) { + case LeveledLogger: + c.RequestLogHook(hookLogger{v}, req.Request, i) + case Logger: + c.RequestLogHook(v, req.Request, i) + default: + c.RequestLogHook(nil, req.Request, i) + } + } + + // Attempt the request + resp, doErr = c.HTTPClient.Do(req.Request) + + // Check if we should continue with retries. + shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, doErr) + if !shouldRetry && doErr == nil && req.responseHandler != nil { + respErr = req.responseHandler(resp) + shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, respErr) + } + + err := doErr + if respErr != nil { + err = respErr + } + if err != nil { + switch v := logger.(type) { + case LeveledLogger: + v.Error("request failed", "error", err, "method", req.Method, "url", req.URL) + case Logger: + v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) + } + } else { + // Call this here to maintain the behavior of logging all requests, + // even if CheckRetry signals to stop. + if c.ResponseLogHook != nil { + // Call the response logger function if provided. + switch v := logger.(type) { + case LeveledLogger: + c.ResponseLogHook(hookLogger{v}, resp) + case Logger: + c.ResponseLogHook(v, resp) + default: + c.ResponseLogHook(nil, resp) + } + } + } + + if !shouldRetry { + break + } + + // We do this before drainBody because there's no need for the I/O if + // we're breaking out + remain := c.RetryMax - i + if remain <= 0 { + break + } + + // We're going to retry, consume any response to reuse the connection. + if doErr == nil { + c.drainBody(resp.Body) + } + + wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) + if logger != nil { + desc := fmt.Sprintf("%s %s", req.Method, req.URL) + if resp != nil { + desc = fmt.Sprintf("%s (status: %d)", desc, resp.StatusCode) + } + switch v := logger.(type) { + case LeveledLogger: + v.Debug("retrying request", "request", desc, "timeout", wait, "remaining", remain) + case Logger: + v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) + } + } + timer := time.NewTimer(wait) + select { + case <-req.Context().Done(): + timer.Stop() + c.HTTPClient.CloseIdleConnections() + return nil, req.Context().Err() + case <-timer.C: + } + + // Make shallow copy of http Request so that we can modify its body + // without racing against the closeBody call in persistConn.writeLoop. + httpreq := *req.Request + req.Request = &httpreq + } + + // this is the closest we have to success criteria + if doErr == nil && respErr == nil && checkErr == nil && !shouldRetry { + return resp, nil + } + + defer c.HTTPClient.CloseIdleConnections() + + var err error + if checkErr != nil { + err = checkErr + } else if respErr != nil { + err = respErr + } else { + err = doErr + } + + if c.ErrorHandler != nil { + return c.ErrorHandler(resp, err, attempt) + } + + // By default, we close the response body and return an error without + // returning the response + if resp != nil { + c.drainBody(resp.Body) + } + + // this means CheckRetry thought the request was a failure, but didn't + // communicate why + if err == nil { + return nil, fmt.Errorf("%s %s giving up after %d attempt(s)", + req.Method, req.URL, attempt) + } + + return nil, fmt.Errorf("%s %s giving up after %d attempt(s): %w", + req.Method, req.URL, attempt, err) +} + +// Try to read the response body so we can reuse this connection. +func (c *Client) drainBody(body io.ReadCloser) { + defer body.Close() + _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) + if err != nil { + if c.logger() != nil { + switch v := c.logger().(type) { + case LeveledLogger: + v.Error("error reading response body", "error", err) + case Logger: + v.Printf("[ERR] error reading response body: %v", err) + } + } + } +} + +// Get is a shortcut for doing a GET request without making a new client. +func Get(url string) (*http.Response, error) { + return defaultClient.Get(url) +} + +// Get is a convenience helper for doing simple GET requests. +func (c *Client) Get(url string) (*http.Response, error) { + req, err := NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Head is a shortcut for doing a HEAD request without making a new client. +func Head(url string) (*http.Response, error) { + return defaultClient.Head(url) +} + +// Head is a convenience method for doing simple HEAD requests. +func (c *Client) Head(url string) (*http.Response, error) { + req, err := NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Post is a shortcut for doing a POST request without making a new client. +func Post(url, bodyType string, body interface{}) (*http.Response, error) { + return defaultClient.Post(url, bodyType, body) +} + +// Post is a convenience method for doing simple POST requests. +func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) { + req, err := NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return c.Do(req) +} + +// PostForm is a shortcut to perform a POST with form data without creating +// a new client. +func PostForm(url string, data url.Values) (*http.Response, error) { + return defaultClient.PostForm(url, data) +} + +// PostForm is a convenience method for doing simple POST operations using +// pre-filled url.Values form data. +func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { + return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// StandardClient returns a stdlib *http.Client with a custom Transport, which +// shims in a *retryablehttp.Client for added retries. +func (c *Client) StandardClient() *http.Client { + return &http.Client{ + Transport: &RoundTripper{Client: c}, + } +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go new file mode 100644 index 00000000000..8f3ee358427 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go @@ -0,0 +1,52 @@ +package retryablehttp + +import ( + "errors" + "net/http" + "net/url" + "sync" +) + +// RoundTripper implements the http.RoundTripper interface, using a retrying +// HTTP client to execute requests. +// +// It is important to note that retryablehttp doesn't always act exactly as a +// RoundTripper should. This is highly dependent on the retryable client's +// configuration. +type RoundTripper struct { + // The client to use during requests. If nil, the default retryablehttp + // client and settings will be used. + Client *Client + + // once ensures that the logic to initialize the default client runs at + // most once, in a single thread. + once sync.Once +} + +// init initializes the underlying retryable client. +func (rt *RoundTripper) init() { + if rt.Client == nil { + rt.Client = NewClient() + } +} + +// RoundTrip satisfies the http.RoundTripper interface. +func (rt *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + rt.once.Do(rt.init) + + // Convert the request to be retryable. + retryableReq, err := FromRequest(req) + if err != nil { + return nil, err + } + + // Execute the request. + resp, err := rt.Client.Do(retryableReq) + // If we got an error returned by standard library's `Do` method, unwrap it + // otherwise we will wind up erroneously re-nesting the error. + if _, ok := err.(*url.Error); ok { + return resp, errors.Unwrap(err) + } + + return resp, err +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml new file mode 100644 index 00000000000..80e1de44e96 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.6 + +branches: + only: + - master + +script: make test diff --git a/vendor/github.com/hashicorp/go-rootcerts/LICENSE b/vendor/github.com/hashicorp/go-rootcerts/LICENSE new file mode 100644 index 00000000000..e87a115e462 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-rootcerts/Makefile b/vendor/github.com/hashicorp/go-rootcerts/Makefile new file mode 100644 index 00000000000..c3989e789f6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/Makefile @@ -0,0 +1,8 @@ +TEST?=./... + +test: + go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4 + go vet $(TEST) + go test $(TEST) -race + +.PHONY: test diff --git a/vendor/github.com/hashicorp/go-rootcerts/README.md b/vendor/github.com/hashicorp/go-rootcerts/README.md new file mode 100644 index 00000000000..6a128e1e14a --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/README.md @@ -0,0 +1,44 @@ +# rootcerts + +Functions for loading root certificates for TLS connections. + +----- + +Go's standard library `crypto/tls` provides a common mechanism for configuring +TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool +of certificates for the client to use as a trust store when verifying server +certificates. + +This library contains utility functions for loading certificates destined for +that field, as well as one other important thing: + +When the `RootCAs` field is `nil`, the standard library attempts to load the +host's root CA set. This behavior is OS-specific, and the Darwin +implementation contains [a bug that prevents trusted certificates from the +System and Login keychains from being loaded][1]. This library contains +Darwin-specific behavior that works around that bug. + +[1]: https://github.com/golang/go/issues/14514 + +## Example Usage + +Here's a snippet demonstrating how this library is meant to be used: + +```go +func httpClient() (*http.Client, error) + tlsConfig := &tls.Config{} + err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ + CAFile: os.Getenv("MYAPP_CAFILE"), + CAPath: os.Getenv("MYAPP_CAPATH"), + Certificate: os.Getenv("MYAPP_CERTIFICATE"), + }) + if err != nil { + return nil, err + } + c := cleanhttp.DefaultClient() + t := cleanhttp.DefaultTransport() + t.TLSClientConfig = tlsConfig + c.Transport = t + return c, nil +} +``` diff --git a/vendor/github.com/hashicorp/go-rootcerts/doc.go b/vendor/github.com/hashicorp/go-rootcerts/doc.go new file mode 100644 index 00000000000..b55cc628485 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/doc.go @@ -0,0 +1,9 @@ +// Package rootcerts contains functions to aid in loading CA certificates for +// TLS connections. +// +// In addition, its default behavior on Darwin works around an open issue [1] +// in Go's crypto/x509 that prevents certicates from being loaded from the +// System or Login keychains. +// +// [1] https://github.com/golang/go/issues/14514 +package rootcerts diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go new file mode 100644 index 00000000000..69aabd6bc74 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go @@ -0,0 +1,123 @@ +package rootcerts + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// Config determines where LoadCACerts will load certificates from. When CAFile, +// CACertificate and CAPath are blank, this library's functions will either load +// system roots explicitly and return them, or set the CertPool to nil to allow +// Go's standard library to load system certs. +type Config struct { + // CAFile is a path to a PEM-encoded certificate file or bundle. Takes + // precedence over CACertificate and CAPath. + CAFile string + + // CACertificate is a PEM-encoded certificate or bundle. Takes precedence + // over CAPath. + CACertificate []byte + + // CAPath is a path to a directory populated with PEM-encoded certificates. + CAPath string +} + +// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the +// Config specified. +func ConfigureTLS(t *tls.Config, c *Config) error { + if t == nil { + return nil + } + pool, err := LoadCACerts(c) + if err != nil { + return err + } + t.RootCAs = pool + return nil +} + +// LoadCACerts loads a CertPool based on the Config specified. +func LoadCACerts(c *Config) (*x509.CertPool, error) { + if c == nil { + c = &Config{} + } + if c.CAFile != "" { + return LoadCAFile(c.CAFile) + } + if len(c.CACertificate) != 0 { + return AppendCertificate(c.CACertificate) + } + if c.CAPath != "" { + return LoadCAPath(c.CAPath) + } + + return LoadSystemCAs() +} + +// LoadCAFile loads a single PEM-encoded file from the path specified. +func LoadCAFile(caFile string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("Error loading CA File: %s", err) + } + + ok := pool.AppendCertsFromPEM(pem) + if !ok { + return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile) + } + + return pool, nil +} + +// AppendCertificate appends an in-memory PEM-encoded certificate or bundle and returns a pool. +func AppendCertificate(ca []byte) (*x509.CertPool, error) { + pool := x509.NewCertPool() + + ok := pool.AppendCertsFromPEM(ca) + if !ok { + return nil, errors.New("Error appending CA: Couldn't parse PEM") + } + + return pool, nil +} + +// LoadCAPath walks the provided path and loads all certificates encounted into +// a pool. +func LoadCAPath(caPath string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() { + return nil + } + + pem, err := ioutil.ReadFile(path) + if err != nil { + return fmt.Errorf("Error loading file from CAPath: %s", err) + } + + ok := pool.AppendCertsFromPEM(pem) + if !ok { + return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path) + } + + return nil + } + + err := filepath.Walk(caPath, walkFn) + if err != nil { + return nil, err + } + + return pool, nil +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go new file mode 100644 index 00000000000..66b1472c4a0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go @@ -0,0 +1,12 @@ +// +build !darwin + +package rootcerts + +import "crypto/x509" + +// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that +// default behavior of standard TLS config libraries is triggered, which is to +// load system certs. +func LoadSystemCAs() (*x509.CertPool, error) { + return nil, nil +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go new file mode 100644 index 00000000000..a9a040657fe --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go @@ -0,0 +1,48 @@ +package rootcerts + +import ( + "crypto/x509" + "os/exec" + "path" + + "github.com/mitchellh/go-homedir" +) + +// LoadSystemCAs has special behavior on Darwin systems to work around +func LoadSystemCAs() (*x509.CertPool, error) { + pool := x509.NewCertPool() + + for _, keychain := range certKeychains() { + err := addCertsFromKeychain(pool, keychain) + if err != nil { + return nil, err + } + } + + return pool, nil +} + +func addCertsFromKeychain(pool *x509.CertPool, keychain string) error { + cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain) + data, err := cmd.Output() + if err != nil { + return err + } + + pool.AppendCertsFromPEM(data) + + return nil +} + +func certKeychains() []string { + keychains := []string{ + "/System/Library/Keychains/SystemRootCertificates.keychain", + "/Library/Keychains/System.keychain", + } + home, err := homedir.Dir() + if err == nil { + loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain") + keychains = append(keychains, loginKeychain) + } + return keychains +} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/mlock/LICENSE b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/LICENSE new file mode 100644 index 00000000000..e87a115e462 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock.go b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock.go new file mode 100644 index 00000000000..1675633d34b --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock.go @@ -0,0 +1,15 @@ +package mlock + +// This should be set by the OS-specific packages to tell whether LockMemory +// is supported or not. +var supported bool + +// Supported returns true if LockMemory is functional on this system. +func Supported() bool { + return supported +} + +// LockMemory prevents any memory from being swapped to disk. +func LockMemory() error { + return lockMemory() +} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unavail.go b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unavail.go new file mode 100644 index 00000000000..941eb2d7bdd --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unavail.go @@ -0,0 +1,13 @@ +// +build darwin nacl netbsd plan9 windows + +package mlock + +func init() { + supported = false +} + +func lockMemory() error { + // XXX: No good way to do this on Windows. There is the VirtualLock + // method, but it requires a specific address and offset. + return nil +} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unix.go b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unix.go new file mode 100644 index 00000000000..af0a69d48a3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unix.go @@ -0,0 +1,18 @@ +// +build dragonfly freebsd linux openbsd solaris + +package mlock + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +func init() { + supported = true +} + +func lockMemory() error { + // Mlockall prevents all current and future pages from being swapped out. + return unix.Mlockall(syscall.MCL_CURRENT | syscall.MCL_FUTURE) +} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/LICENSE b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/LICENSE new file mode 100644 index 00000000000..e87a115e462 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parsepath.go b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parsepath.go new file mode 100644 index 00000000000..d59ecbb2b79 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parsepath.go @@ -0,0 +1,65 @@ +package parseutil + +import ( + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "strings" +) + +var ( + ErrNotAUrl = errors.New("not a url") + ErrNotParsed = errors.New("not a parsed value") +) + +// ParsePath parses a URL with schemes file://, env://, or any other. Depending +// on the scheme it will return specific types of data: +// +// * file:// will return a string with the file's contents +// +// * env:// will return a string with the env var's contents +// +// * Anything else will return the string as it was. Functionally this means +// anything for which Go's `url.Parse` function does not throw an error. If you +// want to ensure that this function errors if a known scheme is not found, use +// MustParsePath. +// +// On error, we return the original string along with the error. The caller can +// switch on errors.Is(err, ErrNotAUrl) to understand whether it was the parsing +// step that errored or something else (such as a file not found). This is +// useful to attempt to read a non-URL string from some resource, but where the +// original input may simply be a valid string of that type. +func ParsePath(path string) (string, error) { + return parsePath(path, false) +} + +// MustParsePath behaves like ParsePath but will return ErrNotAUrl if the value +// is not a URL with a scheme that can be parsed by this function. +func MustParsePath(path string) (string, error) { + return parsePath(path, true) +} + +func parsePath(path string, mustParse bool) (string, error) { + path = strings.TrimSpace(path) + parsed, err := url.Parse(path) + if err != nil { + return path, fmt.Errorf("error parsing url (%q): %w", err.Error(), ErrNotAUrl) + } + switch parsed.Scheme { + case "file": + contents, err := ioutil.ReadFile(strings.TrimPrefix(path, "file://")) + if err != nil { + return path, fmt.Errorf("error reading file at %s: %w", path, err) + } + return strings.TrimSpace(string(contents)), nil + case "env": + return strings.TrimSpace(os.Getenv(strings.TrimPrefix(path, "env://"))), nil + default: + if mustParse { + return "", ErrNotParsed + } + return path, nil + } +} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parseutil.go b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parseutil.go new file mode 100644 index 00000000000..e469499bdcd --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parseutil.go @@ -0,0 +1,502 @@ +package parseutil + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/strutil" + sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/mitchellh/mapstructure" +) + +var validCapacityString = regexp.MustCompile("^[\t ]*([0-9]+)[\t ]?([kmgtKMGT][iI]?[bB])?[\t ]*$") + +// ParseCapacityString parses a capacity string and returns the number of bytes it represents. +// Capacity strings are things like 5gib or 10MB. Supported prefixes are kb, kib, mb, mib, gb, +// gib, tb, tib, which are not case sensitive. If no prefix is present, the number is assumed +// to be in bytes already. +func ParseCapacityString(in interface{}) (uint64, error) { + var cap uint64 + + jsonIn, ok := in.(json.Number) + if ok { + in = jsonIn.String() + } + + switch inp := in.(type) { + case nil: + // return default of zero + case string: + if inp == "" { + return cap, nil + } + + matches := validCapacityString.FindStringSubmatch(inp) + + // no sub-groups means we couldn't parse it + if len(matches) <= 1 { + return cap, errors.New("could not parse capacity from input") + } + + var multiplier uint64 = 1 + switch strings.ToLower(matches[2]) { + case "kb": + multiplier = 1000 + case "kib": + multiplier = 1024 + case "mb": + multiplier = 1000 * 1000 + case "mib": + multiplier = 1024 * 1024 + case "gb": + multiplier = 1000 * 1000 * 1000 + case "gib": + multiplier = 1024 * 1024 * 1024 + case "tb": + multiplier = 1000 * 1000 * 1000 * 1000 + case "tib": + multiplier = 1024 * 1024 * 1024 * 1024 + } + + size, err := strconv.ParseUint(matches[1], 10, 64) + if err != nil { + return cap, err + } + + cap = size * multiplier + case int: + cap = uint64(inp) + case int32: + cap = uint64(inp) + case int64: + cap = uint64(inp) + case uint: + cap = uint64(inp) + case uint32: + cap = uint64(inp) + case uint64: + cap = uint64(inp) + case float32: + cap = uint64(inp) + case float64: + cap = uint64(inp) + default: + return cap, errors.New("could not parse capacity from input") + } + + return cap, nil +} + +// Parse a duration from an arbitrary value (a string or numeric value) into +// a time.Duration; when units are missing (such as when a numeric type is +// provided), the duration is assumed to be in seconds. +func ParseDurationSecond(in interface{}) (time.Duration, error) { + var dur time.Duration + jsonIn, ok := in.(json.Number) + if ok { + in = jsonIn.String() + } + switch inp := in.(type) { + case nil: + // return default of zero + case string: + if inp == "" { + return dur, nil + } + + if v, err := strconv.ParseInt(inp, 10, 64); err == nil { + return time.Duration(v) * time.Second, nil + } + + if strings.HasSuffix(inp, "d") { + v, err := strconv.ParseInt(inp[:len(inp)-1], 10, 64) + if err != nil { + return dur, err + } + return time.Duration(v) * 24 * time.Hour, nil + } + + var err error + if dur, err = time.ParseDuration(inp); err != nil { + return dur, err + } + case int: + dur = time.Duration(inp) * time.Second + case int32: + dur = time.Duration(inp) * time.Second + case int64: + dur = time.Duration(inp) * time.Second + case uint: + dur = time.Duration(inp) * time.Second + case uint32: + dur = time.Duration(inp) * time.Second + case uint64: + dur = time.Duration(inp) * time.Second + case float32: + dur = time.Duration(inp) * time.Second + case float64: + dur = time.Duration(inp) * time.Second + case time.Duration: + dur = inp + default: + return 0, errors.New("could not parse duration from input") + } + + return dur, nil +} + +// Parse an absolute timestamp from the provided arbitrary value (string or +// numeric value). When an untyped numeric value is provided, it is assumed +// to be seconds from the Unix Epoch. +func ParseAbsoluteTime(in interface{}) (time.Time, error) { + var t time.Time + switch inp := in.(type) { + case nil: + // return default of zero + return t, nil + case string: + // Allow RFC3339 with nanoseconds, or without, + // or an epoch time as an integer. + var err error + t, err = time.Parse(time.RFC3339Nano, inp) + if err == nil { + break + } + t, err = time.Parse(time.RFC3339, inp) + if err == nil { + break + } + epochTime, err := strconv.ParseInt(inp, 10, 64) + if err == nil { + t = time.Unix(epochTime, 0) + break + } + return t, errors.New("could not parse string as date and time") + case json.Number: + epochTime, err := inp.Int64() + if err != nil { + return t, err + } + t = time.Unix(epochTime, 0) + case int: + t = time.Unix(int64(inp), 0) + case int32: + t = time.Unix(int64(inp), 0) + case int64: + t = time.Unix(inp, 0) + case uint: + t = time.Unix(int64(inp), 0) + case uint32: + t = time.Unix(int64(inp), 0) + case uint64: + t = time.Unix(int64(inp), 0) + default: + return t, errors.New("could not parse time from input type") + } + return t, nil +} + +// ParseInt takes an arbitrary value (either a string or numeric type) and +// parses it as an int64 value. This value is assumed to be larger than the +// provided type, but cannot safely be cast. +// +// When the end value is bounded (such as an int value), it is recommended +// to instead call SafeParseInt or SafeParseIntRange to safely cast to a +// more restrictive type. +func ParseInt(in interface{}) (int64, error) { + var ret int64 + jsonIn, ok := in.(json.Number) + if ok { + in = jsonIn.String() + } + switch in.(type) { + case string: + inp := in.(string) + if inp == "" { + return 0, nil + } + var err error + left, err := strconv.ParseInt(inp, 10, 64) + if err != nil { + return ret, err + } + ret = left + case int: + ret = int64(in.(int)) + case int32: + ret = int64(in.(int32)) + case int64: + ret = in.(int64) + case uint: + ret = int64(in.(uint)) + case uint32: + ret = int64(in.(uint32)) + case uint64: + ret = int64(in.(uint64)) + default: + return 0, errors.New("could not parse value from input") + } + + return ret, nil +} + +// ParseDirectIntSlice behaves similarly to ParseInt, but accepts typed +// slices, returning a slice of int64s. +// +// If the starting value may not be in slice form (e.g.. a bare numeric value +// could be provided), it is suggested to call ParseIntSlice instead. +func ParseDirectIntSlice(in interface{}) ([]int64, error) { + var ret []int64 + + switch in.(type) { + case []int: + for _, v := range in.([]int) { + ret = append(ret, int64(v)) + } + case []int32: + for _, v := range in.([]int32) { + ret = append(ret, int64(v)) + } + case []int64: + // For consistency to ensure callers can always modify ret without + // impacting in. + for _, v := range in.([]int64) { + ret = append(ret, v) + } + case []uint: + for _, v := range in.([]uint) { + ret = append(ret, int64(v)) + } + case []uint32: + for _, v := range in.([]uint32) { + ret = append(ret, int64(v)) + } + case []uint64: + for _, v := range in.([]uint64) { + ret = append(ret, int64(v)) + } + case []json.Number: + for _, v := range in.([]json.Number) { + element, err := ParseInt(v) + if err != nil { + return nil, err + } + ret = append(ret, element) + } + case []string: + for _, v := range in.([]string) { + element, err := ParseInt(v) + if err != nil { + return nil, err + } + ret = append(ret, element) + } + default: + return nil, errors.New("could not parse value from input") + } + + return ret, nil +} + +// ParseIntSlice is a helper function for handling upgrades of optional +// slices; that is, if the API accepts a type similar to , +// nicely handle the common cases of providing only an int-ish, providing +// an actual slice of int-ishes, or providing a comma-separated list of +// numbers. +// +// When []int64 is not the desired final type (or the values should be +// range-bound), it is suggested to call SafeParseIntSlice or +// SafeParseIntSliceRange instead. +func ParseIntSlice(in interface{}) ([]int64, error) { + if ret, err := ParseInt(in); err == nil { + return []int64{ret}, nil + } + + if ret, err := ParseDirectIntSlice(in); err == nil { + return ret, nil + } + + if strings, err := ParseCommaStringSlice(in); err == nil { + var ret []int64 + for _, v := range strings { + if v == "" { + // Ignore empty fields + continue + } + + element, err := ParseInt(v) + if err != nil { + return nil, err + } + ret = append(ret, element) + } + + return ret, nil + } + + return nil, errors.New("could not parse value from input") +} + +// Parses the provided arbitrary value as a boolean-like value. +func ParseBool(in interface{}) (bool, error) { + var result bool + if err := mapstructure.WeakDecode(in, &result); err != nil { + return false, err + } + return result, nil +} + +// Parses the provided arbitrary value as a string. +func ParseString(in interface{}) (string, error) { + var result string + if err := mapstructure.WeakDecode(in, &result); err != nil { + return "", err + } + return result, nil +} + +// Parses the provided string-like value as a comma-separated list of values. +func ParseCommaStringSlice(in interface{}) ([]string, error) { + jsonIn, ok := in.(json.Number) + if ok { + in = jsonIn.String() + } + + rawString, ok := in.(string) + if ok && rawString == "" { + return []string{}, nil + } + var result []string + config := &mapstructure.DecoderConfig{ + Result: &result, + WeaklyTypedInput: true, + DecodeHook: mapstructure.StringToSliceHookFunc(","), + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return nil, err + } + if err := decoder.Decode(in); err != nil { + return nil, err + } + return strutil.TrimStrings(result), nil +} + +// Parses the specified value as one or more addresses, separated by commas. +func ParseAddrs(addrs interface{}) ([]*sockaddr.SockAddrMarshaler, error) { + out := make([]*sockaddr.SockAddrMarshaler, 0) + stringAddrs := make([]string, 0) + + switch addrs.(type) { + case string: + stringAddrs = strutil.ParseArbitraryStringSlice(addrs.(string), ",") + if len(stringAddrs) == 0 { + return nil, fmt.Errorf("unable to parse addresses from %v", addrs) + } + + case []string: + stringAddrs = addrs.([]string) + + case []interface{}: + for _, v := range addrs.([]interface{}) { + stringAddr, ok := v.(string) + if !ok { + return nil, fmt.Errorf("error parsing %v as string", v) + } + stringAddrs = append(stringAddrs, stringAddr) + } + + default: + return nil, fmt.Errorf("unknown address input type %T", addrs) + } + + for _, addr := range stringAddrs { + sa, err := sockaddr.NewSockAddr(addr) + if err != nil { + return nil, fmt.Errorf("error parsing address %q: %w", addr, err) + } + out = append(out, &sockaddr.SockAddrMarshaler{ + SockAddr: sa, + }) + } + + return out, nil +} + +// Parses the provided arbitrary value (see ParseInt), ensuring it is within +// the specified range (inclusive of bounds). If this range corresponds to a +// smaller type, the returned value can then be safely cast without risking +// overflow. +func SafeParseIntRange(in interface{}, min int64, max int64) (int64, error) { + raw, err := ParseInt(in) + if err != nil { + return 0, err + } + + if raw < min || raw > max { + return 0, fmt.Errorf("error parsing int value; out of range [%v to %v]: %v", min, max, raw) + } + + return raw, nil +} + +// Parses the specified arbitrary value (see ParseInt), ensuring that the +// resulting value is within the range for an int value. If no error occurred, +// the caller knows no overflow occurred. +func SafeParseInt(in interface{}) (int, error) { + raw, err := SafeParseIntRange(in, math.MinInt, math.MaxInt) + return int(raw), err +} + +// Parses the provided arbitrary value (see ParseIntSlice) into a slice of +// int64 values, ensuring each is within the specified range (inclusive of +// bounds). If this range corresponds to a smaller type, the returned value +// can then be safely cast without risking overflow. +// +// If elements is positive, it is used to ensure the resulting slice is +// bounded above by that many number of elements (inclusive). +func SafeParseIntSliceRange(in interface{}, minValue int64, maxValue int64, elements int) ([]int64, error) { + raw, err := ParseIntSlice(in) + if err != nil { + return nil, err + } + + if elements > 0 && len(raw) > elements { + return nil, fmt.Errorf("error parsing value from input: got %v but expected at most %v elements", len(raw), elements) + } + + for index, value := range raw { + if value < minValue || value > maxValue { + return nil, fmt.Errorf("error parsing value from input: element %v was outside of range [%v to %v]: %v", index, minValue, maxValue, value) + } + } + + return raw, nil +} + +// Parses the provided arbitrary value (see ParseIntSlice) into a slice of +// int values, ensuring the each resulting value in the slice is within the +// range for an int value. If no error occurred, the caller knows no overflow +// occurred. +// +// If elements is positive, it is used to ensure the resulting slice is +// bounded above by that many number of elements (inclusive). +func SafeParseIntSlice(in interface{}, elements int) ([]int, error) { + raw, err := SafeParseIntSliceRange(in, math.MinInt, math.MaxInt, elements) + if err != nil || raw == nil { + return nil, err + } + + var result = make([]int, 0, len(raw)) + for _, element := range raw { + result = append(result, int(element)) + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/strutil/LICENSE b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/LICENSE new file mode 100644 index 00000000000..e87a115e462 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/strutil/strutil.go b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/strutil.go new file mode 100644 index 00000000000..102462dc60e --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/strutil.go @@ -0,0 +1,510 @@ +package strutil + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "sort" + "strings" + "unicode" + + glob "github.com/ryanuber/go-glob" +) + +// StrListContainsGlob looks for a string in a list of strings and allows +// globs. +func StrListContainsGlob(haystack []string, needle string) bool { + for _, item := range haystack { + if glob.Glob(item, needle) { + return true + } + } + return false +} + +// StrListContains looks for a string in a list of strings. +func StrListContains(haystack []string, needle string) bool { + for _, item := range haystack { + if item == needle { + return true + } + } + return false +} + +// StrListContainsCaseInsensitive looks for a string in a list of strings. +func StrListContainsCaseInsensitive(haystack []string, needle string) bool { + for _, item := range haystack { + if strings.EqualFold(item, needle) { + return true + } + } + return false +} + +// StrListSubset checks if a given list is a subset +// of another set +func StrListSubset(super, sub []string) bool { + for _, item := range sub { + if !StrListContains(super, item) { + return false + } + } + return true +} + +// ParseDedupAndSortStrings parses a comma separated list of strings +// into a slice of strings. The return slice will be sorted and will +// not contain duplicate or empty items. +func ParseDedupAndSortStrings(input string, sep string) []string { + input = strings.TrimSpace(input) + parsed := []string{} + if input == "" { + // Don't return nil + return parsed + } + return RemoveDuplicates(strings.Split(input, sep), false) +} + +// ParseDedupLowercaseAndSortStrings parses a comma separated list of +// strings into a slice of strings. The return slice will be sorted and +// will not contain duplicate or empty items. The values will be converted +// to lower case. +func ParseDedupLowercaseAndSortStrings(input string, sep string) []string { + input = strings.TrimSpace(input) + parsed := []string{} + if input == "" { + // Don't return nil + return parsed + } + return RemoveDuplicates(strings.Split(input, sep), true) +} + +// ParseKeyValues parses a comma separated list of `=` tuples +// into a map[string]string. +func ParseKeyValues(input string, out map[string]string, sep string) error { + if out == nil { + return fmt.Errorf("'out is nil") + } + + keyValues := ParseDedupLowercaseAndSortStrings(input, sep) + if len(keyValues) == 0 { + return nil + } + + for _, keyValue := range keyValues { + shards := strings.Split(keyValue, "=") + if len(shards) != 2 { + return fmt.Errorf("invalid format") + } + + key := strings.TrimSpace(shards[0]) + value := strings.TrimSpace(shards[1]) + if key == "" || value == "" { + return fmt.Errorf("invalid pair: key: %q value: %q", key, value) + } + out[key] = value + } + return nil +} + +// ParseArbitraryKeyValues parses arbitrary tuples. The input +// can be one of the following: +// * JSON string +// * Base64 encoded JSON string +// * Comma separated list of `=` pairs +// * Base64 encoded string containing comma separated list of +// `=` pairs +// +// Input will be parsed into the output parameter, which should +// be a non-nil map[string]string. +func ParseArbitraryKeyValues(input string, out map[string]string, sep string) error { + input = strings.TrimSpace(input) + if input == "" { + return nil + } + if out == nil { + return fmt.Errorf("'out' is nil") + } + + // Try to base64 decode the input. If successful, consider the decoded + // value as input. + inputBytes, err := base64.StdEncoding.DecodeString(input) + if err == nil { + input = string(inputBytes) + } + + // Try to JSON unmarshal the input. If successful, consider that the + // metadata was supplied as JSON input. + err = json.Unmarshal([]byte(input), &out) + if err != nil { + // If JSON unmarshaling fails, consider that the input was + // supplied as a comma separated string of 'key=value' pairs. + if err = ParseKeyValues(input, out, sep); err != nil { + return fmt.Errorf("failed to parse the input: %w", err) + } + } + + // Validate the parsed input + for key, value := range out { + if key != "" && value == "" { + return fmt.Errorf("invalid value for key %q", key) + } + } + + return nil +} + +// ParseStringSlice parses a `sep`-separated list of strings into a +// []string with surrounding whitespace removed. +// +// The output will always be a valid slice but may be of length zero. +func ParseStringSlice(input string, sep string) []string { + input = strings.TrimSpace(input) + if input == "" { + return []string{} + } + + splitStr := strings.Split(input, sep) + ret := make([]string, len(splitStr)) + for i, val := range splitStr { + ret[i] = strings.TrimSpace(val) + } + + return ret +} + +// ParseArbitraryStringSlice parses arbitrary string slice. The input +// can be one of the following: +// * JSON string +// * Base64 encoded JSON string +// * `sep` separated list of values +// * Base64-encoded string containing a `sep` separated list of values +// +// Note that the separator is ignored if the input is found to already be in a +// structured format (e.g., JSON) +// +// The output will always be a valid slice but may be of length zero. +func ParseArbitraryStringSlice(input string, sep string) []string { + input = strings.TrimSpace(input) + if input == "" { + return []string{} + } + + // Try to base64 decode the input. If successful, consider the decoded + // value as input. + inputBytes, err := base64.StdEncoding.DecodeString(input) + if err == nil { + input = string(inputBytes) + } + + ret := []string{} + + // Try to JSON unmarshal the input. If successful, consider that the + // metadata was supplied as JSON input. + err = json.Unmarshal([]byte(input), &ret) + if err != nil { + // If JSON unmarshaling fails, consider that the input was + // supplied as a separated string of values. + return ParseStringSlice(input, sep) + } + + if ret == nil { + return []string{} + } + + return ret +} + +// TrimStrings takes a slice of strings and returns a slice of strings +// with trimmed spaces +func TrimStrings(items []string) []string { + ret := make([]string, len(items)) + for i, item := range items { + ret[i] = strings.TrimSpace(item) + } + return ret +} + +// RemoveDuplicates removes duplicate and empty elements from a slice of +// strings. This also may convert the items in the slice to lower case and +// returns a sorted slice. +func RemoveDuplicates(items []string, lowercase bool) []string { + itemsMap := make(map[string]struct{}, len(items)) + for _, item := range items { + item = strings.TrimSpace(item) + if item == "" { + continue + } + if lowercase { + item = strings.ToLower(item) + } + itemsMap[item] = struct{}{} + } + items = make([]string, 0, len(itemsMap)) + for item := range itemsMap { + items = append(items, item) + } + sort.Strings(items) + return items +} + +// RemoveDuplicatesStable removes duplicate and empty elements from a slice of +// strings, preserving order (and case) of the original slice. +// In all cases, strings are compared after trimming whitespace +// If caseInsensitive, strings will be compared after ToLower() +func RemoveDuplicatesStable(items []string, caseInsensitive bool) []string { + itemsMap := make(map[string]struct{}, len(items)) + deduplicated := make([]string, 0, len(items)) + + for _, item := range items { + key := strings.TrimSpace(item) + if _, ok := itemsMap[key]; ok || key == "" { + continue + } + if caseInsensitive { + key = strings.ToLower(key) + } + if _, ok := itemsMap[key]; ok { + continue + } + itemsMap[key] = struct{}{} + deduplicated = append(deduplicated, item) + } + return deduplicated +} + +// RemoveEmpty removes empty elements from a slice of +// strings +func RemoveEmpty(items []string) []string { + if len(items) == 0 { + return items + } + itemsSlice := make([]string, 0, len(items)) + for _, item := range items { + if item == "" { + continue + } + itemsSlice = append(itemsSlice, item) + } + return itemsSlice +} + +// EquivalentSlices checks whether the given string sets are equivalent, as in, +// they contain the same values. +func EquivalentSlices(a, b []string) bool { + if a == nil && b == nil { + return true + } + + if a == nil || b == nil { + return false + } + + // First we'll build maps to ensure unique values + mapA := make(map[string]struct{}, len(a)) + mapB := make(map[string]struct{}, len(b)) + for _, keyA := range a { + mapA[keyA] = struct{}{} + } + for _, keyB := range b { + mapB[keyB] = struct{}{} + } + + // Now we'll build our checking slices + sortedA := make([]string, 0, len(mapA)) + sortedB := make([]string, 0, len(mapB)) + for keyA := range mapA { + sortedA = append(sortedA, keyA) + } + for keyB := range mapB { + sortedB = append(sortedB, keyB) + } + sort.Strings(sortedA) + sort.Strings(sortedB) + + // Finally, compare + if len(sortedA) != len(sortedB) { + return false + } + + for i := range sortedA { + if sortedA[i] != sortedB[i] { + return false + } + } + + return true +} + +// EqualStringMaps tests whether two map[string]string objects are equal. +// Equal means both maps have the same sets of keys and values. This function +// is 6-10x faster than a call to reflect.DeepEqual(). +func EqualStringMaps(a, b map[string]string) bool { + if len(a) != len(b) { + return false + } + + for k := range a { + v, ok := b[k] + if !ok || a[k] != v { + return false + } + } + + return true +} + +// StrListDelete removes the first occurrence of the given item from the slice +// of strings if the item exists. +func StrListDelete(s []string, d string) []string { + if s == nil { + return s + } + + for index, element := range s { + if element == d { + return append(s[:index], s[index+1:]...) + } + } + + return s +} + +// GlobbedStringsMatch compares item to val with support for a leading and/or +// trailing wildcard '*' in item. +func GlobbedStringsMatch(item, val string) bool { + if len(item) < 2 { + return val == item + } + + hasPrefix := strings.HasPrefix(item, "*") + hasSuffix := strings.HasSuffix(item, "*") + + if hasPrefix && hasSuffix { + return strings.Contains(val, item[1:len(item)-1]) + } else if hasPrefix { + return strings.HasSuffix(val, item[1:]) + } else if hasSuffix { + return strings.HasPrefix(val, item[:len(item)-1]) + } + + return val == item +} + +// AppendIfMissing adds a string to a slice if the given string is not present +func AppendIfMissing(slice []string, i string) []string { + if StrListContains(slice, i) { + return slice + } + return append(slice, i) +} + +// MergeSlices adds an arbitrary number of slices together, uniquely +func MergeSlices(args ...[]string) []string { + all := map[string]struct{}{} + for _, slice := range args { + for _, v := range slice { + all[v] = struct{}{} + } + } + + result := make([]string, 0, len(all)) + for k := range all { + result = append(result, k) + } + sort.Strings(result) + return result +} + +// Difference returns the set difference (A - B) of the two given slices. The +// result will also remove any duplicated values in set A regardless of whether +// that matches any values in set B. +func Difference(a, b []string, lowercase bool) []string { + if len(a) == 0 { + return a + } + if len(b) == 0 { + if !lowercase { + return a + } + newA := make([]string, len(a)) + for i, v := range a { + newA[i] = strings.ToLower(v) + } + return newA + } + + a = RemoveDuplicates(a, lowercase) + b = RemoveDuplicates(b, lowercase) + + itemsMap := map[string]struct{}{} + for _, aVal := range a { + itemsMap[aVal] = struct{}{} + } + + // Perform difference calculation + for _, bVal := range b { + if _, ok := itemsMap[bVal]; ok { + delete(itemsMap, bVal) + } + } + + items := []string{} + for item := range itemsMap { + items = append(items, item) + } + sort.Strings(items) + return items +} + +// GetString attempts to retrieve a value from the provided map and assert that it is a string. If the key does not +// exist in the map, this will return an empty string. If the key exists, but the value is not a string type, this will +// return an error. If no map or key is provied, this will return an error +func GetString(m map[string]interface{}, key string) (string, error) { + if m == nil { + return "", fmt.Errorf("missing map") + } + if key == "" { + return "", fmt.Errorf("missing key") + } + + rawVal, ok := m[key] + if !ok { + return "", nil + } + + str, ok := rawVal.(string) + if !ok { + return "", fmt.Errorf("invalid value at %s: is a %T", key, rawVal) + } + return str, nil +} + +// Printable returns true if all characters in the string are printable +// according to Unicode +func Printable(s string) bool { + return strings.IndexFunc(s, func(c rune) bool { + return !unicode.IsPrint(c) + }) == -1 +} + +// StringListToInterfaceList simply takes a []string and turns it into a +// []interface{} to satisfy the input requirements for other library functions +func StringListToInterfaceList(in []string) []interface{} { + ret := make([]interface{}, len(in)) + for i, v := range in { + ret[i] = v + } + return ret +} + +// Reverse reverses the input string +func Reverse(in string) string { + l := len(in) + out := make([]byte, l) + for i := 0; i <= l/2; i++ { + out[i], out[l-1-i] = in[l-1-i], in[i] + } + return string(out) +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/.gitignore b/vendor/github.com/hashicorp/go-sockaddr/.gitignore new file mode 100644 index 00000000000..41720b86e3e --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +.cover.out* +coverage.html diff --git a/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile b/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile new file mode 100644 index 00000000000..0f3ae1661e2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile @@ -0,0 +1,65 @@ +TOOLS= golang.org/x/tools/cover +GOCOVER_TMPFILE?= $(GOCOVER_FILE).tmp +GOCOVER_FILE?= .cover.out +GOCOVERHTML?= coverage.html +FIND=`/usr/bin/which 2> /dev/null gfind find | /usr/bin/grep -v ^no | /usr/bin/head -n 1` +XARGS=`/usr/bin/which 2> /dev/null gxargs xargs | /usr/bin/grep -v ^no | /usr/bin/head -n 1` + +test:: $(GOCOVER_FILE) + @$(MAKE) -C cmd/sockaddr test + +cover:: coverage_report + +$(GOCOVER_FILE):: + @${FIND} . -type d ! -path '*cmd*' ! -path '*.git*' -print0 | ${XARGS} -0 -I % sh -ec "cd % && rm -f $(GOCOVER_TMPFILE) && go test -coverprofile=$(GOCOVER_TMPFILE)" + + @echo 'mode: set' > $(GOCOVER_FILE) + @${FIND} . -type f ! -path '*cmd*' ! -path '*.git*' -name "$(GOCOVER_TMPFILE)" -print0 | ${XARGS} -0 -n1 cat $(GOCOVER_TMPFILE) | grep -v '^mode: ' >> ${PWD}/$(GOCOVER_FILE) + +$(GOCOVERHTML): $(GOCOVER_FILE) + go tool cover -html=$(GOCOVER_FILE) -o $(GOCOVERHTML) + +coverage_report:: $(GOCOVER_FILE) + go tool cover -html=$(GOCOVER_FILE) + +audit_tools:: + @go get -u github.com/golang/lint/golint && echo "Installed golint:" + @go get -u github.com/fzipp/gocyclo && echo "Installed gocyclo:" + @go get -u github.com/remyoudompheng/go-misc/deadcode && echo "Installed deadcode:" + @go get -u github.com/client9/misspell/cmd/misspell && echo "Installed misspell:" + @go get -u github.com/gordonklaus/ineffassign && echo "Installed ineffassign:" + +audit:: + deadcode + go tool vet -all *.go + go tool vet -shadow=true *.go + golint *.go + ineffassign . + gocyclo -over 65 *.go + misspell *.go + +clean:: + rm -f $(GOCOVER_FILE) $(GOCOVERHTML) + +dev:: + @go build + @$(MAKE) -B -C cmd/sockaddr sockaddr + +install:: + @go install + @$(MAKE) -C cmd/sockaddr install + +doc:: + @echo Visit: http://127.0.0.1:6161/pkg/github.com/hashicorp/go-sockaddr/ + godoc -http=:6161 -goroot $GOROOT + +world:: + @set -e; \ + for os in solaris darwin freebsd linux windows android; do \ + for arch in amd64; do \ + printf "Building on %s-%s\n" "$${os}" "$${arch}" ; \ + env GOOS="$${os}" GOARCH="$${arch}" go build -o /dev/null; \ + done; \ + done + + $(MAKE) -C cmd/sockaddr world diff --git a/vendor/github.com/hashicorp/go-sockaddr/LICENSE b/vendor/github.com/hashicorp/go-sockaddr/LICENSE new file mode 100644 index 00000000000..a612ad9813b --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-sockaddr/README.md b/vendor/github.com/hashicorp/go-sockaddr/README.md new file mode 100644 index 00000000000..a2e170ae09c --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/README.md @@ -0,0 +1,118 @@ +# go-sockaddr + +## `sockaddr` Library + +Socket address convenience functions for Go. `go-sockaddr` is a convenience +library that makes doing the right thing with IP addresses easy. `go-sockaddr` +is loosely modeled after the UNIX `sockaddr_t` and creates a union of the family +of `sockaddr_t` types (see below for an ascii diagram). Library documentation +is available +at +[https://godoc.org/github.com/hashicorp/go-sockaddr](https://godoc.org/github.com/hashicorp/go-sockaddr). +The primary intent of the library was to make it possible to define heuristics +for selecting the correct IP addresses when a configuration is evaluated at +runtime. See +the +[docs](https://godoc.org/github.com/hashicorp/go-sockaddr), +[`template` package](https://godoc.org/github.com/hashicorp/go-sockaddr/template), +tests, +and +[CLI utility](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr) +for details and hints as to how to use this library. + +For example, with this library it is possible to find an IP address that: + +* is attached to a default route + ([`GetDefaultInterfaces()`](https://godoc.org/github.com/hashicorp/go-sockaddr#GetDefaultInterfaces)) +* is contained within a CIDR block ([`IfByNetwork()`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByNetwork)) +* is an RFC1918 address + ([`IfByRFC("1918")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByRFC)) +* is ordered + ([`OrderedIfAddrBy(args)`](https://godoc.org/github.com/hashicorp/go-sockaddr#OrderedIfAddrBy) where + `args` includes, but is not limited + to, + [`AscIfType`](https://godoc.org/github.com/hashicorp/go-sockaddr#AscIfType), + [`AscNetworkSize`](https://godoc.org/github.com/hashicorp/go-sockaddr#AscNetworkSize)) +* excludes all IPv6 addresses + ([`IfByType("^(IPv4)$")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByType)) +* is larger than a `/32` + ([`IfByMaskSize(32)`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByMaskSize)) +* is not on a `down` interface + ([`ExcludeIfs("flags", "down")`](https://godoc.org/github.com/hashicorp/go-sockaddr#ExcludeIfs)) +* preferences an IPv6 address over an IPv4 address + ([`SortIfByType()`](https://godoc.org/github.com/hashicorp/go-sockaddr#SortIfByType) + + [`ReverseIfAddrs()`](https://godoc.org/github.com/hashicorp/go-sockaddr#ReverseIfAddrs)); and +* excludes any IP in RFC6890 address + ([`IfByRFC("6890")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByRFC)) + +Or any combination or variation therein. + +There are also a few simple helper functions such as `GetPublicIP` and +`GetPrivateIP` which both return strings and select the first public or private +IP address on the default interface, respectively. Similarly, there is also a +helper function called `GetInterfaceIP` which returns the first usable IP +address on the named interface. + +## `sockaddr` CLI + +Given the possible complexity of the `sockaddr` library, there is a CLI utility +that accompanies the library, also +called +[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr). +The +[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr) +utility exposes nearly all of the functionality of the library and can be used +either as an administrative tool or testing tool. To install +the +[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr), +run: + +```text +$ go get -u github.com/hashicorp/go-sockaddr/cmd/sockaddr +``` + +If you're familiar with UNIX's `sockaddr` struct's, the following diagram +mapping the C `sockaddr` (top) to `go-sockaddr` structs (bottom) and +interfaces will be helpful: + +``` ++-------------------------------------------------------+ +| | +| sockaddr | +| SockAddr | +| | +| +--------------+ +----------------------------------+ | +| | sockaddr_un | | | | +| | SockAddrUnix | | sockaddr_in{,6} | | +| +--------------+ | IPAddr | | +| | | | +| | +-------------+ +--------------+ | | +| | | sockaddr_in | | sockaddr_in6 | | | +| | | IPv4Addr | | IPv6Addr | | | +| | +-------------+ +--------------+ | | +| | | | +| +----------------------------------+ | +| | ++-------------------------------------------------------+ +``` + +## Inspiration and Design + +There were many subtle inspirations that led to this design, but the most direct +inspiration for the filtering syntax was +OpenBSD's +[`pf.conf(5)`](https://www.freebsd.org/cgi/man.cgi?query=pf.conf&apropos=0&sektion=0&arch=default&format=html#PARAMETERS) firewall +syntax that lets you select the first IP address on a given named interface. +The original problem stemmed from: + +* needing to create immutable images using [Packer](https://www.packer.io) that + ran the [Consul](https://www.consul.io) process (Consul can only use one IP + address at a time); +* images that may or may not have multiple interfaces or IP addresses at + runtime; and +* we didn't want to rely on configuration management to render out the correct + IP address if the VM image was being used in an auto-scaling group. + +Instead we needed some way to codify a heuristic that would correctly select the +right IP address but the input parameters were not known when the image was +created. diff --git a/vendor/github.com/hashicorp/go-sockaddr/doc.go b/vendor/github.com/hashicorp/go-sockaddr/doc.go new file mode 100644 index 00000000000..90671deb51d --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/doc.go @@ -0,0 +1,5 @@ +/* +Package sockaddr is a Go implementation of the UNIX socket family data types and +related helper functions. +*/ +package sockaddr diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go b/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go new file mode 100644 index 00000000000..0811b275990 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go @@ -0,0 +1,254 @@ +package sockaddr + +import "strings" + +// ifAddrAttrMap is a map of the IfAddr type-specific attributes. +var ifAddrAttrMap map[AttrName]func(IfAddr) string +var ifAddrAttrs []AttrName + +func init() { + ifAddrAttrInit() +} + +// GetPrivateIP returns a string with a single IP address that is part of RFC +// 6890 and has a default route. If the system can't determine its IP address +// or find an RFC 6890 IP address, an empty string will be returned instead. +// This function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetPrivateInterfaces | attr "address"}}' +/// ``` +func GetPrivateIP() (string, error) { + privateIfs, err := GetPrivateInterfaces() + if err != nil { + return "", err + } + if len(privateIfs) < 1 { + return "", nil + } + + ifAddr := privateIfs[0] + ip := *ToIPAddr(ifAddr.SockAddr) + return ip.NetIP().String(), nil +} + +// GetPrivateIPs returns a string with all IP addresses that are part of RFC +// 6890 (regardless of whether or not there is a default route, unlike +// GetPublicIP). If the system can't find any RFC 6890 IP addresses, an empty +// string will be returned instead. This function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "RFC" "6890" | join "address" " "}}' +/// ``` +func GetPrivateIPs() (string, error) { + ifAddrs, err := GetAllInterfaces() + if err != nil { + return "", err + } else if len(ifAddrs) < 1 { + return "", nil + } + + ifAddrs, _ = FilterIfByType(ifAddrs, TypeIP) + if len(ifAddrs) == 0 { + return "", nil + } + + OrderedIfAddrBy(AscIfType, AscIfNetworkSize).Sort(ifAddrs) + + ifAddrs, _, err = IfByRFC("6890", ifAddrs) + if err != nil { + return "", err + } else if len(ifAddrs) == 0 { + return "", nil + } + + _, ifAddrs, err = IfByRFC(ForwardingBlacklistRFC, ifAddrs) + if err != nil { + return "", err + } else if len(ifAddrs) == 0 { + return "", nil + } + + ips := make([]string, 0, len(ifAddrs)) + for _, ifAddr := range ifAddrs { + ip := *ToIPAddr(ifAddr.SockAddr) + s := ip.NetIP().String() + ips = append(ips, s) + } + + return strings.Join(ips, " "), nil +} + +// GetPublicIP returns a string with a single IP address that is NOT part of RFC +// 6890 and has a default route. If the system can't determine its IP address +// or find a non RFC 6890 IP address, an empty string will be returned instead. +// This function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetPublicInterfaces | attr "address"}}' +/// ``` +func GetPublicIP() (string, error) { + publicIfs, err := GetPublicInterfaces() + if err != nil { + return "", err + } else if len(publicIfs) < 1 { + return "", nil + } + + ifAddr := publicIfs[0] + ip := *ToIPAddr(ifAddr.SockAddr) + return ip.NetIP().String(), nil +} + +// GetPublicIPs returns a string with all IP addresses that are NOT part of RFC +// 6890 (regardless of whether or not there is a default route, unlike +// GetPublicIP). If the system can't find any non RFC 6890 IP addresses, an +// empty string will be returned instead. This function is the `eval` +// equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | exclude "RFC" "6890" | join "address" " "}}' +/// ``` +func GetPublicIPs() (string, error) { + ifAddrs, err := GetAllInterfaces() + if err != nil { + return "", err + } else if len(ifAddrs) < 1 { + return "", nil + } + + ifAddrs, _ = FilterIfByType(ifAddrs, TypeIP) + if len(ifAddrs) == 0 { + return "", nil + } + + OrderedIfAddrBy(AscIfType, AscIfNetworkSize).Sort(ifAddrs) + + _, ifAddrs, err = IfByRFC("6890", ifAddrs) + if err != nil { + return "", err + } else if len(ifAddrs) == 0 { + return "", nil + } + + ips := make([]string, 0, len(ifAddrs)) + for _, ifAddr := range ifAddrs { + ip := *ToIPAddr(ifAddr.SockAddr) + s := ip.NetIP().String() + ips = append(ips, s) + } + + return strings.Join(ips, " "), nil +} + +// GetInterfaceIP returns a string with a single IP address sorted by the size +// of the network (i.e. IP addresses with a smaller netmask, larger network +// size, are sorted first). This function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "name" <> | sort "type,size" | include "flag" "forwardable" | attr "address" }}' +/// ``` +func GetInterfaceIP(namedIfRE string) (string, error) { + ifAddrs, err := GetAllInterfaces() + if err != nil { + return "", err + } + + ifAddrs, _, err = IfByName(namedIfRE, ifAddrs) + if err != nil { + return "", err + } + + ifAddrs, _, err = IfByFlag("forwardable", ifAddrs) + if err != nil { + return "", err + } + + ifAddrs, err = SortIfBy("+type,+size", ifAddrs) + if err != nil { + return "", err + } + + if len(ifAddrs) == 0 { + return "", err + } + + ip := ToIPAddr(ifAddrs[0].SockAddr) + if ip == nil { + return "", err + } + + return IPAddrAttr(*ip, "address"), nil +} + +// GetInterfaceIPs returns a string with all IPs, sorted by the size of the +// network (i.e. IP addresses with a smaller netmask, larger network size, are +// sorted first), on a named interface. This function is the `eval` equivalent +// of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "name" <> | sort "type,size" | join "address" " "}}' +/// ``` +func GetInterfaceIPs(namedIfRE string) (string, error) { + ifAddrs, err := GetAllInterfaces() + if err != nil { + return "", err + } + + ifAddrs, _, err = IfByName(namedIfRE, ifAddrs) + if err != nil { + return "", err + } + + ifAddrs, err = SortIfBy("+type,+size", ifAddrs) + if err != nil { + return "", err + } + + if len(ifAddrs) == 0 { + return "", err + } + + ips := make([]string, 0, len(ifAddrs)) + for _, ifAddr := range ifAddrs { + ip := *ToIPAddr(ifAddr.SockAddr) + s := ip.NetIP().String() + ips = append(ips, s) + } + + return strings.Join(ips, " "), nil +} + +// IfAddrAttrs returns a list of attributes supported by the IfAddr type +func IfAddrAttrs() []AttrName { + return ifAddrAttrs +} + +// IfAddrAttr returns a string representation of an attribute for the given +// IfAddr. +func IfAddrAttr(ifAddr IfAddr, attrName AttrName) string { + fn, found := ifAddrAttrMap[attrName] + if !found { + return "" + } + + return fn(ifAddr) +} + +// ifAddrAttrInit is called once at init() +func ifAddrAttrInit() { + // Sorted for human readability + ifAddrAttrs = []AttrName{ + "flags", + "name", + } + + ifAddrAttrMap = map[AttrName]func(ifAddr IfAddr) string{ + "flags": func(ifAddr IfAddr) string { + return ifAddr.Interface.Flags.String() + }, + "name": func(ifAddr IfAddr) string { + return ifAddr.Interface.Name + }, + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go new file mode 100644 index 00000000000..80f61bef680 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go @@ -0,0 +1,1304 @@ +package sockaddr + +import ( + "encoding/binary" + "errors" + "fmt" + "math/big" + "net" + "regexp" + "sort" + "strconv" + "strings" +) + +var ( + // Centralize all regexps and regexp.Copy() where necessary. + signRE *regexp.Regexp = regexp.MustCompile(`^[\s]*[+-]`) + whitespaceRE *regexp.Regexp = regexp.MustCompile(`[\s]+`) + ifNameRE *regexp.Regexp = regexp.MustCompile(`^(?:Ethernet|Wireless LAN) adapter ([^:]+):`) + ipAddrRE *regexp.Regexp = regexp.MustCompile(`^ IPv[46] Address\. \. \. \. \. \. \. \. \. \. \. : ([^\s]+)`) +) + +// IfAddrs is a slice of IfAddr +type IfAddrs []IfAddr + +func (ifs IfAddrs) Len() int { return len(ifs) } + +// CmpIfFunc is the function signature that must be met to be used in the +// OrderedIfAddrBy multiIfAddrSorter +type CmpIfAddrFunc func(p1, p2 *IfAddr) int + +// multiIfAddrSorter implements the Sort interface, sorting the IfAddrs within. +type multiIfAddrSorter struct { + ifAddrs IfAddrs + cmp []CmpIfAddrFunc +} + +// Sort sorts the argument slice according to the Cmp functions passed to +// OrderedIfAddrBy. +func (ms *multiIfAddrSorter) Sort(ifAddrs IfAddrs) { + ms.ifAddrs = ifAddrs + sort.Sort(ms) +} + +// OrderedIfAddrBy sorts SockAddr by the list of sort function pointers. +func OrderedIfAddrBy(cmpFuncs ...CmpIfAddrFunc) *multiIfAddrSorter { + return &multiIfAddrSorter{ + cmp: cmpFuncs, + } +} + +// Len is part of sort.Interface. +func (ms *multiIfAddrSorter) Len() int { + return len(ms.ifAddrs) +} + +// Less is part of sort.Interface. It is implemented by looping along the Cmp() +// functions until it finds a comparison that is either less than or greater +// than. A return value of 0 defers sorting to the next function in the +// multisorter (which means the results of sorting may leave the resutls in a +// non-deterministic order). +func (ms *multiIfAddrSorter) Less(i, j int) bool { + p, q := &ms.ifAddrs[i], &ms.ifAddrs[j] + // Try all but the last comparison. + var k int + for k = 0; k < len(ms.cmp)-1; k++ { + cmp := ms.cmp[k] + x := cmp(p, q) + switch x { + case -1: + // p < q, so we have a decision. + return true + case 1: + // p > q, so we have a decision. + return false + } + // p == q; try the next comparison. + } + // All comparisons to here said "equal", so just return whatever the + // final comparison reports. + switch ms.cmp[k](p, q) { + case -1: + return true + case 1: + return false + default: + // Still a tie! Now what? + return false + panic("undefined sort order for remaining items in the list") + } +} + +// Swap is part of sort.Interface. +func (ms *multiIfAddrSorter) Swap(i, j int) { + ms.ifAddrs[i], ms.ifAddrs[j] = ms.ifAddrs[j], ms.ifAddrs[i] +} + +// AscIfAddress is a sorting function to sort IfAddrs by their respective +// address type. Non-equal types are deferred in the sort. +func AscIfAddress(p1Ptr, p2Ptr *IfAddr) int { + return AscAddress(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// AscIfDefault is a sorting function to sort IfAddrs by whether or not they +// have a default route or not. Non-equal types are deferred in the sort. +// +// FIXME: This is a particularly expensive sorting operation because of the +// non-memoized calls to NewRouteInfo(). In an ideal world the routeInfo data +// once at the start of the sort and pass it along as a context or by wrapping +// the IfAddr type with this information (this would also solve the inability to +// return errors and the possibility of failing silently). Fortunately, +// N*log(N) where N = 3 is only ~6.2 invocations. Not ideal, but not worth +// optimizing today. The common case is this gets called once or twice. +// Patches welcome. +func AscIfDefault(p1Ptr, p2Ptr *IfAddr) int { + ri, err := NewRouteInfo() + if err != nil { + return sortDeferDecision + } + + defaultIfName, err := ri.GetDefaultInterfaceName() + if err != nil { + return sortDeferDecision + } + + switch { + case p1Ptr.Interface.Name == defaultIfName && p2Ptr.Interface.Name == defaultIfName: + return sortDeferDecision + case p1Ptr.Interface.Name == defaultIfName: + return sortReceiverBeforeArg + case p2Ptr.Interface.Name == defaultIfName: + return sortArgBeforeReceiver + default: + return sortDeferDecision + } +} + +// AscIfName is a sorting function to sort IfAddrs by their interface names. +func AscIfName(p1Ptr, p2Ptr *IfAddr) int { + return strings.Compare(p1Ptr.Name, p2Ptr.Name) +} + +// AscIfNetworkSize is a sorting function to sort IfAddrs by their respective +// network mask size. +func AscIfNetworkSize(p1Ptr, p2Ptr *IfAddr) int { + return AscNetworkSize(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// AscIfPort is a sorting function to sort IfAddrs by their respective +// port type. Non-equal types are deferred in the sort. +func AscIfPort(p1Ptr, p2Ptr *IfAddr) int { + return AscPort(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// AscIfPrivate is a sorting function to sort IfAddrs by "private" values before +// "public" values. Both IPv4 and IPv6 are compared against RFC6890 (RFC6890 +// includes, and is not limited to, RFC1918 and RFC6598 for IPv4, and IPv6 +// includes RFC4193). +func AscIfPrivate(p1Ptr, p2Ptr *IfAddr) int { + return AscPrivate(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// AscIfType is a sorting function to sort IfAddrs by their respective address +// type. Non-equal types are deferred in the sort. +func AscIfType(p1Ptr, p2Ptr *IfAddr) int { + return AscType(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfAddress is identical to AscIfAddress but reverse ordered. +func DescIfAddress(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscAddress(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfDefault is identical to AscIfDefault but reverse ordered. +func DescIfDefault(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscIfDefault(p1Ptr, p2Ptr) +} + +// DescIfName is identical to AscIfName but reverse ordered. +func DescIfName(p1Ptr, p2Ptr *IfAddr) int { + return -1 * strings.Compare(p1Ptr.Name, p2Ptr.Name) +} + +// DescIfNetworkSize is identical to AscIfNetworkSize but reverse ordered. +func DescIfNetworkSize(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscNetworkSize(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfPort is identical to AscIfPort but reverse ordered. +func DescIfPort(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscPort(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfPrivate is identical to AscIfPrivate but reverse ordered. +func DescIfPrivate(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscPrivate(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfType is identical to AscIfType but reverse ordered. +func DescIfType(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscType(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// FilterIfByType filters IfAddrs and returns a list of the matching type +func FilterIfByType(ifAddrs IfAddrs, type_ SockAddrType) (matchedIfs, excludedIfs IfAddrs) { + excludedIfs = make(IfAddrs, 0, len(ifAddrs)) + matchedIfs = make(IfAddrs, 0, len(ifAddrs)) + + for _, ifAddr := range ifAddrs { + if ifAddr.SockAddr.Type()&type_ != 0 { + matchedIfs = append(matchedIfs, ifAddr) + } else { + excludedIfs = append(excludedIfs, ifAddr) + } + } + return matchedIfs, excludedIfs +} + +// IfAttr forwards the selector to IfAttr.Attr() for resolution. If there is +// more than one IfAddr, only the first IfAddr is used. +func IfAttr(selectorName string, ifAddr IfAddr) (string, error) { + attrName := AttrName(strings.ToLower(selectorName)) + attrVal, err := ifAddr.Attr(attrName) + return attrVal, err +} + +// IfAttrs forwards the selector to IfAttrs.Attr() for resolution. If there is +// more than one IfAddr, only the first IfAddr is used. +func IfAttrs(selectorName string, ifAddrs IfAddrs) (string, error) { + if len(ifAddrs) == 0 { + return "", nil + } + + attrName := AttrName(strings.ToLower(selectorName)) + attrVal, err := ifAddrs[0].Attr(attrName) + return attrVal, err +} + +// GetAllInterfaces iterates over all available network interfaces and finds all +// available IP addresses on each interface and converts them to +// sockaddr.IPAddrs, and returning the result as an array of IfAddr. +func GetAllInterfaces() (IfAddrs, error) { + ifs, err := net.Interfaces() + if err != nil { + return nil, err + } + + ifAddrs := make(IfAddrs, 0, len(ifs)) + for _, intf := range ifs { + addrs, err := intf.Addrs() + if err != nil { + return nil, err + } + + for _, addr := range addrs { + var ipAddr IPAddr + ipAddr, err = NewIPAddr(addr.String()) + if err != nil { + return IfAddrs{}, fmt.Errorf("unable to create an IP address from %q", addr.String()) + } + + ifAddr := IfAddr{ + SockAddr: ipAddr, + Interface: intf, + } + ifAddrs = append(ifAddrs, ifAddr) + } + } + + return ifAddrs, nil +} + +// GetDefaultInterfaces returns IfAddrs of the addresses attached to the default +// route. +func GetDefaultInterfaces() (IfAddrs, error) { + ri, err := NewRouteInfo() + if err != nil { + return nil, err + } + + defaultIfName, err := ri.GetDefaultInterfaceName() + if err != nil { + return nil, err + } + + var defaultIfs, ifAddrs IfAddrs + ifAddrs, err = GetAllInterfaces() + for _, ifAddr := range ifAddrs { + if ifAddr.Name == defaultIfName { + defaultIfs = append(defaultIfs, ifAddr) + } + } + + return defaultIfs, nil +} + +// GetPrivateInterfaces returns an IfAddrs that are part of RFC 6890 and have a +// default route. If the system can't determine its IP address or find an RFC +// 6890 IP address, an empty IfAddrs will be returned instead. This function is +// the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "type" "ip" | include "flags" "forwardable" | include "flags" "up" | sort "default,type,size" | include "RFC" "6890" }}' +/// ``` +func GetPrivateInterfaces() (IfAddrs, error) { + privateIfs, err := GetAllInterfaces() + if err != nil { + return IfAddrs{}, err + } + if len(privateIfs) == 0 { + return IfAddrs{}, nil + } + + privateIfs, _ = FilterIfByType(privateIfs, TypeIP) + if len(privateIfs) == 0 { + return IfAddrs{}, nil + } + + privateIfs, _, err = IfByFlag("forwardable", privateIfs) + if err != nil { + return IfAddrs{}, err + } + + privateIfs, _, err = IfByFlag("up", privateIfs) + if err != nil { + return IfAddrs{}, err + } + + if len(privateIfs) == 0 { + return IfAddrs{}, nil + } + + OrderedIfAddrBy(AscIfDefault, AscIfType, AscIfNetworkSize).Sort(privateIfs) + + privateIfs, _, err = IfByRFC("6890", privateIfs) + if err != nil { + return IfAddrs{}, err + } else if len(privateIfs) == 0 { + return IfAddrs{}, nil + } + + return privateIfs, nil +} + +// GetPublicInterfaces returns an IfAddrs that are NOT part of RFC 6890 and has a +// default route. If the system can't determine its IP address or find a non +// RFC 6890 IP address, an empty IfAddrs will be returned instead. This +// function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "type" "ip" | include "flags" "forwardable" | include "flags" "up" | sort "default,type,size" | exclude "RFC" "6890" }}' +/// ``` +func GetPublicInterfaces() (IfAddrs, error) { + publicIfs, err := GetAllInterfaces() + if err != nil { + return IfAddrs{}, err + } + if len(publicIfs) == 0 { + return IfAddrs{}, nil + } + + publicIfs, _ = FilterIfByType(publicIfs, TypeIP) + if len(publicIfs) == 0 { + return IfAddrs{}, nil + } + + publicIfs, _, err = IfByFlag("forwardable", publicIfs) + if err != nil { + return IfAddrs{}, err + } + + publicIfs, _, err = IfByFlag("up", publicIfs) + if err != nil { + return IfAddrs{}, err + } + + if len(publicIfs) == 0 { + return IfAddrs{}, nil + } + + OrderedIfAddrBy(AscIfDefault, AscIfType, AscIfNetworkSize).Sort(publicIfs) + + _, publicIfs, err = IfByRFC("6890", publicIfs) + if err != nil { + return IfAddrs{}, err + } else if len(publicIfs) == 0 { + return IfAddrs{}, nil + } + + return publicIfs, nil +} + +// IfByAddress returns a list of matched and non-matched IfAddrs, or an error if +// the regexp fails to compile. +func IfByAddress(inputRe string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + re, err := regexp.Compile(inputRe) + if err != nil { + return nil, nil, fmt.Errorf("Unable to compile address regexp %+q: %v", inputRe, err) + } + + matchedAddrs := make(IfAddrs, 0, len(ifAddrs)) + excludedAddrs := make(IfAddrs, 0, len(ifAddrs)) + for _, addr := range ifAddrs { + if re.MatchString(addr.SockAddr.String()) { + matchedAddrs = append(matchedAddrs, addr) + } else { + excludedAddrs = append(excludedAddrs, addr) + } + } + + return matchedAddrs, excludedAddrs, nil +} + +// IfByName returns a list of matched and non-matched IfAddrs, or an error if +// the regexp fails to compile. +func IfByName(inputRe string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + re, err := regexp.Compile(inputRe) + if err != nil { + return nil, nil, fmt.Errorf("Unable to compile name regexp %+q: %v", inputRe, err) + } + + matchedAddrs := make(IfAddrs, 0, len(ifAddrs)) + excludedAddrs := make(IfAddrs, 0, len(ifAddrs)) + for _, addr := range ifAddrs { + if re.MatchString(addr.Name) { + matchedAddrs = append(matchedAddrs, addr) + } else { + excludedAddrs = append(excludedAddrs, addr) + } + } + + return matchedAddrs, excludedAddrs, nil +} + +// IfByPort returns a list of matched and non-matched IfAddrs, or an error if +// the regexp fails to compile. +func IfByPort(inputRe string, ifAddrs IfAddrs) (matchedIfs, excludedIfs IfAddrs, err error) { + re, err := regexp.Compile(inputRe) + if err != nil { + return nil, nil, fmt.Errorf("Unable to compile port regexp %+q: %v", inputRe, err) + } + + ipIfs, nonIfs := FilterIfByType(ifAddrs, TypeIP) + matchedIfs = make(IfAddrs, 0, len(ipIfs)) + excludedIfs = append(IfAddrs(nil), nonIfs...) + for _, addr := range ipIfs { + ipAddr := ToIPAddr(addr.SockAddr) + if ipAddr == nil { + continue + } + + port := strconv.FormatInt(int64((*ipAddr).IPPort()), 10) + if re.MatchString(port) { + matchedIfs = append(matchedIfs, addr) + } else { + excludedIfs = append(excludedIfs, addr) + } + } + + return matchedIfs, excludedIfs, nil +} + +// IfByRFC returns a list of matched and non-matched IfAddrs that contain the +// relevant RFC-specified traits. +func IfByRFC(selectorParam string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + inputRFC, err := strconv.ParseUint(selectorParam, 10, 64) + if err != nil { + return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to parse RFC number %q: %v", selectorParam, err) + } + + matchedIfAddrs := make(IfAddrs, 0, len(ifAddrs)) + remainingIfAddrs := make(IfAddrs, 0, len(ifAddrs)) + + rfcNetMap := KnownRFCs() + rfcNets, ok := rfcNetMap[uint(inputRFC)] + if !ok { + return nil, nil, fmt.Errorf("unsupported RFC %d", inputRFC) + } + + for _, ifAddr := range ifAddrs { + var contained bool + for _, rfcNet := range rfcNets { + if rfcNet.Contains(ifAddr.SockAddr) { + matchedIfAddrs = append(matchedIfAddrs, ifAddr) + contained = true + break + } + } + if !contained { + remainingIfAddrs = append(remainingIfAddrs, ifAddr) + } + } + + return matchedIfAddrs, remainingIfAddrs, nil +} + +// IfByRFCs returns a list of matched and non-matched IfAddrs that contain the +// relevant RFC-specified traits. Multiple RFCs can be specified and separated +// by the `|` symbol. No protection is taken to ensure an IfAddr does not end +// up in both the included and excluded list. +func IfByRFCs(selectorParam string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + var includedIfs, excludedIfs IfAddrs + for _, rfcStr := range strings.Split(selectorParam, "|") { + includedRFCIfs, excludedRFCIfs, err := IfByRFC(rfcStr, ifAddrs) + if err != nil { + return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to lookup RFC number %q: %v", rfcStr, err) + } + includedIfs = append(includedIfs, includedRFCIfs...) + excludedIfs = append(excludedIfs, excludedRFCIfs...) + } + + return includedIfs, excludedIfs, nil +} + +// IfByMaskSize returns a list of matched and non-matched IfAddrs that have the +// matching mask size. +func IfByMaskSize(selectorParam string, ifAddrs IfAddrs) (matchedIfs, excludedIfs IfAddrs, err error) { + maskSize, err := strconv.ParseUint(selectorParam, 10, 64) + if err != nil { + return IfAddrs{}, IfAddrs{}, fmt.Errorf("invalid exclude size argument (%q): %v", selectorParam, err) + } + + ipIfs, nonIfs := FilterIfByType(ifAddrs, TypeIP) + matchedIfs = make(IfAddrs, 0, len(ipIfs)) + excludedIfs = append(IfAddrs(nil), nonIfs...) + for _, addr := range ipIfs { + ipAddr := ToIPAddr(addr.SockAddr) + if ipAddr == nil { + return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to filter mask sizes on non-IP type %s: %v", addr.SockAddr.Type().String(), addr.SockAddr.String()) + } + + switch { + case (*ipAddr).Type()&TypeIPv4 != 0 && maskSize > 32: + return IfAddrs{}, IfAddrs{}, fmt.Errorf("mask size out of bounds for IPv4 address: %d", maskSize) + case (*ipAddr).Type()&TypeIPv6 != 0 && maskSize > 128: + return IfAddrs{}, IfAddrs{}, fmt.Errorf("mask size out of bounds for IPv6 address: %d", maskSize) + } + + if (*ipAddr).Maskbits() == int(maskSize) { + matchedIfs = append(matchedIfs, addr) + } else { + excludedIfs = append(excludedIfs, addr) + } + } + + return matchedIfs, excludedIfs, nil +} + +// IfByType returns a list of matching and non-matching IfAddr that match the +// specified type. For instance: +// +// include "type" "IPv4,IPv6" +// +// will include any IfAddrs that is either an IPv4 or IPv6 address. Any +// addresses on those interfaces that don't match will be included in the +// remainder results. +func IfByType(inputTypes string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + matchingIfAddrs := make(IfAddrs, 0, len(ifAddrs)) + remainingIfAddrs := make(IfAddrs, 0, len(ifAddrs)) + + ifTypes := strings.Split(strings.ToLower(inputTypes), "|") + for _, ifType := range ifTypes { + switch ifType { + case "ip", "ipv4", "ipv6", "unix": + // Valid types + default: + return nil, nil, fmt.Errorf("unsupported type %q %q", ifType, inputTypes) + } + } + + for _, ifAddr := range ifAddrs { + for _, ifType := range ifTypes { + var matched bool + switch { + case ifType == "ip" && ifAddr.SockAddr.Type()&TypeIP != 0: + matched = true + case ifType == "ipv4" && ifAddr.SockAddr.Type()&TypeIPv4 != 0: + matched = true + case ifType == "ipv6" && ifAddr.SockAddr.Type()&TypeIPv6 != 0: + matched = true + case ifType == "unix" && ifAddr.SockAddr.Type()&TypeUnix != 0: + matched = true + } + + if matched { + matchingIfAddrs = append(matchingIfAddrs, ifAddr) + } else { + remainingIfAddrs = append(remainingIfAddrs, ifAddr) + } + } + } + + return matchingIfAddrs, remainingIfAddrs, nil +} + +// IfByFlag returns a list of matching and non-matching IfAddrs that match the +// specified type. For instance: +// +// include "flag" "up,broadcast" +// +// will include any IfAddrs that have both the "up" and "broadcast" flags set. +// Any addresses on those interfaces that don't match will be omitted from the +// results. +func IfByFlag(inputFlags string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + matchedAddrs := make(IfAddrs, 0, len(ifAddrs)) + excludedAddrs := make(IfAddrs, 0, len(ifAddrs)) + + var wantForwardable, + wantGlobalUnicast, + wantInterfaceLocalMulticast, + wantLinkLocalMulticast, + wantLinkLocalUnicast, + wantLoopback, + wantMulticast, + wantUnspecified bool + var ifFlags net.Flags + var checkFlags, checkAttrs bool + for _, flagName := range strings.Split(strings.ToLower(inputFlags), "|") { + switch flagName { + case "broadcast": + checkFlags = true + ifFlags = ifFlags | net.FlagBroadcast + case "down": + checkFlags = true + ifFlags = (ifFlags &^ net.FlagUp) + case "forwardable": + checkAttrs = true + wantForwardable = true + case "global unicast": + checkAttrs = true + wantGlobalUnicast = true + case "interface-local multicast": + checkAttrs = true + wantInterfaceLocalMulticast = true + case "link-local multicast": + checkAttrs = true + wantLinkLocalMulticast = true + case "link-local unicast": + checkAttrs = true + wantLinkLocalUnicast = true + case "loopback": + checkAttrs = true + checkFlags = true + ifFlags = ifFlags | net.FlagLoopback + wantLoopback = true + case "multicast": + checkAttrs = true + checkFlags = true + ifFlags = ifFlags | net.FlagMulticast + wantMulticast = true + case "point-to-point": + checkFlags = true + ifFlags = ifFlags | net.FlagPointToPoint + case "unspecified": + checkAttrs = true + wantUnspecified = true + case "up": + checkFlags = true + ifFlags = ifFlags | net.FlagUp + default: + return nil, nil, fmt.Errorf("Unknown interface flag: %+q", flagName) + } + } + + for _, ifAddr := range ifAddrs { + var matched bool + if checkFlags && ifAddr.Interface.Flags&ifFlags == ifFlags { + matched = true + } + if checkAttrs { + if ip := ToIPAddr(ifAddr.SockAddr); ip != nil { + netIP := (*ip).NetIP() + switch { + case wantGlobalUnicast && netIP.IsGlobalUnicast(): + matched = true + case wantInterfaceLocalMulticast && netIP.IsInterfaceLocalMulticast(): + matched = true + case wantLinkLocalMulticast && netIP.IsLinkLocalMulticast(): + matched = true + case wantLinkLocalUnicast && netIP.IsLinkLocalUnicast(): + matched = true + case wantLoopback && netIP.IsLoopback(): + matched = true + case wantMulticast && netIP.IsMulticast(): + matched = true + case wantUnspecified && netIP.IsUnspecified(): + matched = true + case wantForwardable && !IsRFC(ForwardingBlacklist, ifAddr.SockAddr): + matched = true + } + } + } + if matched { + matchedAddrs = append(matchedAddrs, ifAddr) + } else { + excludedAddrs = append(excludedAddrs, ifAddr) + } + } + return matchedAddrs, excludedAddrs, nil +} + +// IfByNetwork returns an IfAddrs that are equal to or included within the +// network passed in by selector. +func IfByNetwork(selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, IfAddrs, error) { + var includedIfs, excludedIfs IfAddrs + for _, netStr := range strings.Split(selectorParam, "|") { + netAddr, err := NewIPAddr(netStr) + if err != nil { + return nil, nil, fmt.Errorf("unable to create an IP address from %+q: %v", netStr, err) + } + + for _, ifAddr := range inputIfAddrs { + if netAddr.Contains(ifAddr.SockAddr) { + includedIfs = append(includedIfs, ifAddr) + } else { + excludedIfs = append(excludedIfs, ifAddr) + } + } + } + + return includedIfs, excludedIfs, nil +} + +// IfAddrMath will return a new IfAddr struct with a mutated value. +func IfAddrMath(operation, value string, inputIfAddr IfAddr) (IfAddr, error) { + // Regexp used to enforce the sign being a required part of the grammar for + // some values. + signRe := signRE.Copy() + + switch strings.ToLower(operation) { + case "address": + // "address" operates on the IP address and is allowed to overflow or + // underflow networks, however it will wrap along the underlying address's + // underlying type. + + if !signRe.MatchString(value) { + return IfAddr{}, fmt.Errorf("sign (+/-) is required for operation %q", operation) + } + + switch sockType := inputIfAddr.SockAddr.Type(); sockType { + case TypeIPv4: + // 33 == Accept any uint32 value + // TODO(seanc@): Add the ability to parse hex + i, err := strconv.ParseInt(value, 10, 33) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr) + ipv4Uint32 := uint32(ipv4.Address) + ipv4Uint32 += uint32(i) + return IfAddr{ + SockAddr: IPv4Addr{ + Address: IPv4Address(ipv4Uint32), + Mask: ipv4.Mask, + }, + Interface: inputIfAddr.Interface, + }, nil + case TypeIPv6: + // 64 == Accept any int32 value + // TODO(seanc@): Add the ability to parse hex. Also parse a bignum int. + i, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr) + ipv6BigIntA := new(big.Int) + ipv6BigIntA.Set(ipv6.Address) + ipv6BigIntB := big.NewInt(i) + + ipv6Addr := ipv6BigIntA.Add(ipv6BigIntA, ipv6BigIntB) + ipv6Addr.And(ipv6Addr, ipv6HostMask) + + return IfAddr{ + SockAddr: IPv6Addr{ + Address: IPv6Address(ipv6Addr), + Mask: ipv6.Mask, + }, + Interface: inputIfAddr.Interface, + }, nil + default: + return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType) + } + case "network": + // "network" operates on the network address. Positive values start at the + // network address and negative values wrap at the network address, which + // means a "-1" value on a network will be the broadcast address after + // wrapping is applied. + + if !signRe.MatchString(value) { + return IfAddr{}, fmt.Errorf("sign (+/-) is required for operation %q", operation) + } + + switch sockType := inputIfAddr.SockAddr.Type(); sockType { + case TypeIPv4: + // 33 == Accept any uint32 value + // TODO(seanc@): Add the ability to parse hex + i, err := strconv.ParseInt(value, 10, 33) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr) + ipv4Uint32 := uint32(ipv4.NetworkAddress()) + + // Wrap along network mask boundaries. EZ-mode wrapping made possible by + // use of int64 vs a uint. + var wrappedMask int64 + if i >= 0 { + wrappedMask = i + } else { + wrappedMask = 1 + i + int64(^uint32(ipv4.Mask)) + } + + ipv4Uint32 = ipv4Uint32 + (uint32(wrappedMask) &^ uint32(ipv4.Mask)) + + return IfAddr{ + SockAddr: IPv4Addr{ + Address: IPv4Address(ipv4Uint32), + Mask: ipv4.Mask, + }, + Interface: inputIfAddr.Interface, + }, nil + case TypeIPv6: + // 64 == Accept any int32 value + // TODO(seanc@): Add the ability to parse hex. Also parse a bignum int. + i, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr) + ipv6BigInt := new(big.Int) + ipv6BigInt.Set(ipv6.NetworkAddress()) + + mask := new(big.Int) + mask.Set(ipv6.Mask) + if i > 0 { + wrappedMask := new(big.Int) + wrappedMask.SetInt64(i) + + wrappedMask.AndNot(wrappedMask, mask) + ipv6BigInt.Add(ipv6BigInt, wrappedMask) + } else { + // Mask off any bits that exceed the network size. Subtract the + // wrappedMask from the last usable - 1 + wrappedMask := new(big.Int) + wrappedMask.SetInt64(-1 * i) + wrappedMask.Sub(wrappedMask, big.NewInt(1)) + + wrappedMask.AndNot(wrappedMask, mask) + + lastUsable := new(big.Int) + lastUsable.Set(ipv6.LastUsable().(IPv6Addr).Address) + + ipv6BigInt = lastUsable.Sub(lastUsable, wrappedMask) + } + + return IfAddr{ + SockAddr: IPv6Addr{ + Address: IPv6Address(ipv6BigInt), + Mask: ipv6.Mask, + }, + Interface: inputIfAddr.Interface, + }, nil + default: + return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType) + } + case "mask": + // "mask" operates on the IP address and returns the IP address on + // which the given integer mask has been applied. If the applied mask + // corresponds to a larger network than the mask of the IP address, + // the latter will be replaced by the former. + switch sockType := inputIfAddr.SockAddr.Type(); sockType { + case TypeIPv4: + i, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + if i > 32 { + return IfAddr{}, fmt.Errorf("parameter for operation %q on ipv4 addresses must be between 0 and 32", operation) + } + + ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr) + + ipv4Mask := net.CIDRMask(int(i), 32) + ipv4MaskUint32 := binary.BigEndian.Uint32(ipv4Mask) + + maskedIpv4 := ipv4.NetIP().Mask(ipv4Mask) + maskedIpv4Uint32 := binary.BigEndian.Uint32(maskedIpv4) + + maskedIpv4MaskUint32 := uint32(ipv4.Mask) + + if ipv4MaskUint32 < maskedIpv4MaskUint32 { + maskedIpv4MaskUint32 = ipv4MaskUint32 + } + + return IfAddr{ + SockAddr: IPv4Addr{ + Address: IPv4Address(maskedIpv4Uint32), + Mask: IPv4Mask(maskedIpv4MaskUint32), + }, + Interface: inputIfAddr.Interface, + }, nil + case TypeIPv6: + i, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + if i > 128 { + return IfAddr{}, fmt.Errorf("parameter for operation %q on ipv6 addresses must be between 0 and 64", operation) + } + + ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr) + + ipv6Mask := net.CIDRMask(int(i), 128) + ipv6MaskBigInt := new(big.Int) + ipv6MaskBigInt.SetBytes(ipv6Mask) + + maskedIpv6 := ipv6.NetIP().Mask(ipv6Mask) + maskedIpv6BigInt := new(big.Int) + maskedIpv6BigInt.SetBytes(maskedIpv6) + + maskedIpv6MaskBigInt := new(big.Int) + maskedIpv6MaskBigInt.Set(ipv6.Mask) + + if ipv6MaskBigInt.Cmp(maskedIpv6MaskBigInt) == -1 { + maskedIpv6MaskBigInt = ipv6MaskBigInt + } + + return IfAddr{ + SockAddr: IPv6Addr{ + Address: IPv6Address(maskedIpv6BigInt), + Mask: IPv6Mask(maskedIpv6MaskBigInt), + }, + Interface: inputIfAddr.Interface, + }, nil + default: + return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType) + } + default: + return IfAddr{}, fmt.Errorf("unsupported math operation: %q", operation) + } +} + +// IfAddrsMath will apply an IfAddrMath operation each IfAddr struct. Any +// failure will result in zero results. +func IfAddrsMath(operation, value string, inputIfAddrs IfAddrs) (IfAddrs, error) { + outputAddrs := make(IfAddrs, 0, len(inputIfAddrs)) + for _, ifAddr := range inputIfAddrs { + result, err := IfAddrMath(operation, value, ifAddr) + if err != nil { + return IfAddrs{}, fmt.Errorf("unable to perform an IPMath operation on %s: %v", ifAddr, err) + } + outputAddrs = append(outputAddrs, result) + } + return outputAddrs, nil +} + +// IncludeIfs returns an IfAddrs based on the passed in selector. +func IncludeIfs(selectorName, selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) { + var includedIfs IfAddrs + var err error + + switch strings.ToLower(selectorName) { + case "address": + includedIfs, _, err = IfByAddress(selectorParam, inputIfAddrs) + case "flag", "flags": + includedIfs, _, err = IfByFlag(selectorParam, inputIfAddrs) + case "name": + includedIfs, _, err = IfByName(selectorParam, inputIfAddrs) + case "network": + includedIfs, _, err = IfByNetwork(selectorParam, inputIfAddrs) + case "port": + includedIfs, _, err = IfByPort(selectorParam, inputIfAddrs) + case "rfc", "rfcs": + includedIfs, _, err = IfByRFCs(selectorParam, inputIfAddrs) + case "size": + includedIfs, _, err = IfByMaskSize(selectorParam, inputIfAddrs) + case "type": + includedIfs, _, err = IfByType(selectorParam, inputIfAddrs) + default: + return IfAddrs{}, fmt.Errorf("invalid include selector %q", selectorName) + } + + if err != nil { + return IfAddrs{}, err + } + + return includedIfs, nil +} + +// ExcludeIfs returns an IfAddrs based on the passed in selector. +func ExcludeIfs(selectorName, selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) { + var excludedIfs IfAddrs + var err error + + switch strings.ToLower(selectorName) { + case "address": + _, excludedIfs, err = IfByAddress(selectorParam, inputIfAddrs) + case "flag", "flags": + _, excludedIfs, err = IfByFlag(selectorParam, inputIfAddrs) + case "name": + _, excludedIfs, err = IfByName(selectorParam, inputIfAddrs) + case "network": + _, excludedIfs, err = IfByNetwork(selectorParam, inputIfAddrs) + case "port": + _, excludedIfs, err = IfByPort(selectorParam, inputIfAddrs) + case "rfc", "rfcs": + _, excludedIfs, err = IfByRFCs(selectorParam, inputIfAddrs) + case "size": + _, excludedIfs, err = IfByMaskSize(selectorParam, inputIfAddrs) + case "type": + _, excludedIfs, err = IfByType(selectorParam, inputIfAddrs) + default: + return IfAddrs{}, fmt.Errorf("invalid exclude selector %q", selectorName) + } + + if err != nil { + return IfAddrs{}, err + } + + return excludedIfs, nil +} + +// SortIfBy returns an IfAddrs sorted based on the passed in selector. Multiple +// sort clauses can be passed in as a comma delimited list without whitespace. +func SortIfBy(selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) { + sortedIfs := append(IfAddrs(nil), inputIfAddrs...) + + clauses := strings.Split(selectorParam, ",") + sortFuncs := make([]CmpIfAddrFunc, len(clauses)) + + for i, clause := range clauses { + switch strings.TrimSpace(strings.ToLower(clause)) { + case "+address", "address": + // The "address" selector returns an array of IfAddrs + // ordered by the network address. IfAddrs that are not + // comparable will be at the end of the list and in a + // non-deterministic order. + sortFuncs[i] = AscIfAddress + case "-address": + sortFuncs[i] = DescIfAddress + case "+default", "default": + sortFuncs[i] = AscIfDefault + case "-default": + sortFuncs[i] = DescIfDefault + case "+name", "name": + // The "name" selector returns an array of IfAddrs + // ordered by the interface name. + sortFuncs[i] = AscIfName + case "-name": + sortFuncs[i] = DescIfName + case "+port", "port": + // The "port" selector returns an array of IfAddrs + // ordered by the port, if included in the IfAddr. + // IfAddrs that are not comparable will be at the end of + // the list and in a non-deterministic order. + sortFuncs[i] = AscIfPort + case "-port": + sortFuncs[i] = DescIfPort + case "+private", "private": + // The "private" selector returns an array of IfAddrs + // ordered by private addresses first. IfAddrs that are + // not comparable will be at the end of the list and in + // a non-deterministic order. + sortFuncs[i] = AscIfPrivate + case "-private": + sortFuncs[i] = DescIfPrivate + case "+size", "size": + // The "size" selector returns an array of IfAddrs + // ordered by the size of the network mask, smaller mask + // (larger number of hosts per network) to largest + // (e.g. a /24 sorts before a /32). + sortFuncs[i] = AscIfNetworkSize + case "-size": + sortFuncs[i] = DescIfNetworkSize + case "+type", "type": + // The "type" selector returns an array of IfAddrs + // ordered by the type of the IfAddr. The sort order is + // Unix, IPv4, then IPv6. + sortFuncs[i] = AscIfType + case "-type": + sortFuncs[i] = DescIfType + default: + // Return an empty list for invalid sort types. + return IfAddrs{}, fmt.Errorf("unknown sort type: %q", clause) + } + } + + OrderedIfAddrBy(sortFuncs...).Sort(sortedIfs) + + return sortedIfs, nil +} + +// UniqueIfAddrsBy creates a unique set of IfAddrs based on the matching +// selector. UniqueIfAddrsBy assumes the input has already been sorted. +func UniqueIfAddrsBy(selectorName string, inputIfAddrs IfAddrs) (IfAddrs, error) { + attrName := strings.ToLower(selectorName) + + ifs := make(IfAddrs, 0, len(inputIfAddrs)) + var lastMatch string + for _, ifAddr := range inputIfAddrs { + var out string + switch attrName { + case "address": + out = ifAddr.SockAddr.String() + case "name": + out = ifAddr.Name + default: + return nil, fmt.Errorf("unsupported unique constraint %+q", selectorName) + } + + switch { + case lastMatch == "", lastMatch != out: + lastMatch = out + ifs = append(ifs, ifAddr) + case lastMatch == out: + continue + } + } + + return ifs, nil +} + +// JoinIfAddrs joins an IfAddrs and returns a string +func JoinIfAddrs(selectorName string, joinStr string, inputIfAddrs IfAddrs) (string, error) { + outputs := make([]string, 0, len(inputIfAddrs)) + attrName := AttrName(strings.ToLower(selectorName)) + + for _, ifAddr := range inputIfAddrs { + var attrVal string + var err error + attrVal, err = ifAddr.Attr(attrName) + if err != nil { + return "", err + } + outputs = append(outputs, attrVal) + } + return strings.Join(outputs, joinStr), nil +} + +// LimitIfAddrs returns a slice of IfAddrs based on the specified limit. +func LimitIfAddrs(lim uint, in IfAddrs) (IfAddrs, error) { + // Clamp the limit to the length of the array + if int(lim) > len(in) { + lim = uint(len(in)) + } + + return in[0:lim], nil +} + +// OffsetIfAddrs returns a slice of IfAddrs based on the specified offset. +func OffsetIfAddrs(off int, in IfAddrs) (IfAddrs, error) { + var end bool + if off < 0 { + end = true + off = off * -1 + } + + if off > len(in) { + return IfAddrs{}, fmt.Errorf("unable to seek past the end of the interface array: offset (%d) exceeds the number of interfaces (%d)", off, len(in)) + } + + if end { + return in[len(in)-off:], nil + } + return in[off:], nil +} + +func (ifAddr IfAddr) String() string { + return fmt.Sprintf("%s %v", ifAddr.SockAddr, ifAddr.Interface) +} + +// parseDefaultIfNameFromRoute parses standard route(8)'s output for the *BSDs +// and Solaris. +func parseDefaultIfNameFromRoute(routeOut string) (string, error) { + lines := strings.Split(routeOut, "\n") + for _, line := range lines { + kvs := strings.SplitN(line, ":", 2) + if len(kvs) != 2 { + continue + } + + if strings.TrimSpace(kvs[0]) == "interface" { + ifName := strings.TrimSpace(kvs[1]) + return ifName, nil + } + } + + return "", errors.New("No default interface found") +} + +// parseDefaultIfNameFromIPCmd parses the default interface from ip(8) for +// Linux. +func parseDefaultIfNameFromIPCmd(routeOut string) (string, error) { + parsedLines := parseIfNameFromIPCmd(routeOut) + for _, parsedLine := range parsedLines { + if parsedLine[0] == "default" && + parsedLine[1] == "via" && + parsedLine[3] == "dev" { + ifName := strings.TrimSpace(parsedLine[4]) + return ifName, nil + } + } + + return "", errors.New("No default interface found") +} + +// parseDefaultIfNameFromIPCmdAndroid parses the default interface from ip(8) for +// Android. +func parseDefaultIfNameFromIPCmdAndroid(routeOut string) (string, error) { + parsedLines := parseIfNameFromIPCmd(routeOut) + if (len(parsedLines) > 0) { + ifName := strings.TrimSpace(parsedLines[0][4]) + return ifName, nil + } + + return "", errors.New("No default interface found") +} + + +// parseIfNameFromIPCmd parses interfaces from ip(8) for +// Linux. +func parseIfNameFromIPCmd(routeOut string) [][]string { + lines := strings.Split(routeOut, "\n") + re := whitespaceRE.Copy() + parsedLines := make([][]string, 0, len(lines)) + for _, line := range lines { + kvs := re.Split(line, -1) + if len(kvs) < 5 { + continue + } + parsedLines = append(parsedLines, kvs) + } + return parsedLines +} + +// parseDefaultIfNameWindows parses the default interface from `netstat -rn` and +// `ipconfig` on Windows. +func parseDefaultIfNameWindows(routeOut, ipconfigOut string) (string, error) { + defaultIPAddr, err := parseDefaultIPAddrWindowsRoute(routeOut) + if err != nil { + return "", err + } + + ifName, err := parseDefaultIfNameWindowsIPConfig(defaultIPAddr, ipconfigOut) + if err != nil { + return "", err + } + + return ifName, nil +} + +// parseDefaultIPAddrWindowsRoute parses the IP address on the default interface +// `netstat -rn`. +// +// NOTES(sean): Only IPv4 addresses are parsed at this time. If you have an +// IPv6 connected host, submit an issue on github.com/hashicorp/go-sockaddr with +// the output from `netstat -rn`, `ipconfig`, and version of Windows to see IPv6 +// support added. +func parseDefaultIPAddrWindowsRoute(routeOut string) (string, error) { + lines := strings.Split(routeOut, "\n") + re := whitespaceRE.Copy() + for _, line := range lines { + kvs := re.Split(strings.TrimSpace(line), -1) + if len(kvs) < 3 { + continue + } + + if kvs[0] == "0.0.0.0" && kvs[1] == "0.0.0.0" { + defaultIPAddr := strings.TrimSpace(kvs[3]) + return defaultIPAddr, nil + } + } + + return "", errors.New("No IP on default interface found") +} + +// parseDefaultIfNameWindowsIPConfig parses the output of `ipconfig` to find the +// interface name forwarding traffic to the default gateway. +func parseDefaultIfNameWindowsIPConfig(defaultIPAddr, routeOut string) (string, error) { + lines := strings.Split(routeOut, "\n") + ifNameRe := ifNameRE.Copy() + ipAddrRe := ipAddrRE.Copy() + var ifName string + for _, line := range lines { + switch ifNameMatches := ifNameRe.FindStringSubmatch(line); { + case len(ifNameMatches) > 1: + ifName = ifNameMatches[1] + continue + } + + switch ipAddrMatches := ipAddrRe.FindStringSubmatch(line); { + case len(ipAddrMatches) > 1 && ipAddrMatches[1] == defaultIPAddr: + return ifName, nil + } + } + + return "", errors.New("No default interface found with matching IP") +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifattr.go b/vendor/github.com/hashicorp/go-sockaddr/ifattr.go new file mode 100644 index 00000000000..6984cb4a354 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ifattr.go @@ -0,0 +1,65 @@ +package sockaddr + +import ( + "fmt" + "net" +) + +// IfAddr is a union of a SockAddr and a net.Interface. +type IfAddr struct { + SockAddr + net.Interface +} + +// Attr returns the named attribute as a string +func (ifAddr IfAddr) Attr(attrName AttrName) (string, error) { + val := IfAddrAttr(ifAddr, attrName) + if val != "" { + return val, nil + } + + return Attr(ifAddr.SockAddr, attrName) +} + +// Attr returns the named attribute as a string +func Attr(sa SockAddr, attrName AttrName) (string, error) { + switch sockType := sa.Type(); { + case sockType&TypeIP != 0: + ip := *ToIPAddr(sa) + attrVal := IPAddrAttr(ip, attrName) + if attrVal != "" { + return attrVal, nil + } + + if sockType == TypeIPv4 { + ipv4 := *ToIPv4Addr(sa) + attrVal := IPv4AddrAttr(ipv4, attrName) + if attrVal != "" { + return attrVal, nil + } + } else if sockType == TypeIPv6 { + ipv6 := *ToIPv6Addr(sa) + attrVal := IPv6AddrAttr(ipv6, attrName) + if attrVal != "" { + return attrVal, nil + } + } + + case sockType == TypeUnix: + us := *ToUnixSock(sa) + attrVal := UnixSockAttr(us, attrName) + if attrVal != "" { + return attrVal, nil + } + } + + // Non type-specific attributes + switch attrName { + case "string": + return sa.String(), nil + case "type": + return sa.Type().String(), nil + } + + return "", fmt.Errorf("unsupported attribute name %q", attrName) +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go b/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go new file mode 100644 index 00000000000..b47d15c2016 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go @@ -0,0 +1,169 @@ +package sockaddr + +import ( + "fmt" + "math/big" + "net" + "strings" +) + +// Constants for the sizes of IPv3, IPv4, and IPv6 address types. +const ( + IPv3len = 6 + IPv4len = 4 + IPv6len = 16 +) + +// IPAddr is a generic IP address interface for IPv4 and IPv6 addresses, +// networks, and socket endpoints. +type IPAddr interface { + SockAddr + AddressBinString() string + AddressHexString() string + Cmp(SockAddr) int + CmpAddress(SockAddr) int + CmpPort(SockAddr) int + FirstUsable() IPAddr + Host() IPAddr + IPPort() IPPort + LastUsable() IPAddr + Maskbits() int + NetIP() *net.IP + NetIPMask() *net.IPMask + NetIPNet() *net.IPNet + Network() IPAddr + Octets() []int +} + +// IPPort is the type for an IP port number for the TCP and UDP IP transports. +type IPPort uint16 + +// IPPrefixLen is a typed integer representing the prefix length for a given +// IPAddr. +type IPPrefixLen byte + +// ipAddrAttrMap is a map of the IPAddr type-specific attributes. +var ipAddrAttrMap map[AttrName]func(IPAddr) string +var ipAddrAttrs []AttrName + +func init() { + ipAddrInit() +} + +// NewIPAddr creates a new IPAddr from a string. Returns nil if the string is +// not an IPv4 or an IPv6 address. +func NewIPAddr(addr string) (IPAddr, error) { + ipv4Addr, err := NewIPv4Addr(addr) + if err == nil { + return ipv4Addr, nil + } + + ipv6Addr, err := NewIPv6Addr(addr) + if err == nil { + return ipv6Addr, nil + } + + return nil, fmt.Errorf("invalid IPAddr %v", addr) +} + +// IPAddrAttr returns a string representation of an attribute for the given +// IPAddr. +func IPAddrAttr(ip IPAddr, selector AttrName) string { + fn, found := ipAddrAttrMap[selector] + if !found { + return "" + } + + return fn(ip) +} + +// IPAttrs returns a list of attributes supported by the IPAddr type +func IPAttrs() []AttrName { + return ipAddrAttrs +} + +// MustIPAddr is a helper method that must return an IPAddr or panic on invalid +// input. +func MustIPAddr(addr string) IPAddr { + ip, err := NewIPAddr(addr) + if err != nil { + panic(fmt.Sprintf("Unable to create an IPAddr from %+q: %v", addr, err)) + } + return ip +} + +// ipAddrInit is called once at init() +func ipAddrInit() { + // Sorted for human readability + ipAddrAttrs = []AttrName{ + "host", + "address", + "port", + "netmask", + "network", + "mask_bits", + "binary", + "hex", + "first_usable", + "last_usable", + "octets", + } + + ipAddrAttrMap = map[AttrName]func(ip IPAddr) string{ + "address": func(ip IPAddr) string { + return ip.NetIP().String() + }, + "binary": func(ip IPAddr) string { + return ip.AddressBinString() + }, + "first_usable": func(ip IPAddr) string { + return ip.FirstUsable().String() + }, + "hex": func(ip IPAddr) string { + return ip.AddressHexString() + }, + "host": func(ip IPAddr) string { + return ip.Host().String() + }, + "last_usable": func(ip IPAddr) string { + return ip.LastUsable().String() + }, + "mask_bits": func(ip IPAddr) string { + return fmt.Sprintf("%d", ip.Maskbits()) + }, + "netmask": func(ip IPAddr) string { + switch v := ip.(type) { + case IPv4Addr: + ipv4Mask := IPv4Addr{ + Address: IPv4Address(v.Mask), + Mask: IPv4HostMask, + } + return ipv4Mask.String() + case IPv6Addr: + ipv6Mask := new(big.Int) + ipv6Mask.Set(v.Mask) + ipv6MaskAddr := IPv6Addr{ + Address: IPv6Address(ipv6Mask), + Mask: ipv6HostMask, + } + return ipv6MaskAddr.String() + default: + return fmt.Sprintf("", ip) + } + }, + "network": func(ip IPAddr) string { + return ip.Network().NetIP().String() + }, + "octets": func(ip IPAddr) string { + octets := ip.Octets() + octetStrs := make([]string, 0, len(octets)) + for _, octet := range octets { + octetStrs = append(octetStrs, fmt.Sprintf("%d", octet)) + } + return strings.Join(octetStrs, " ") + }, + "port": func(ip IPAddr) string { + return fmt.Sprintf("%d", ip.IPPort()) + }, + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go new file mode 100644 index 00000000000..6eeb7ddd2f1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go @@ -0,0 +1,98 @@ +package sockaddr + +import "bytes" + +type IPAddrs []IPAddr + +func (s IPAddrs) Len() int { return len(s) } +func (s IPAddrs) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// // SortIPAddrsByCmp is a type that satisfies sort.Interface and can be used +// // by the routines in this package. The SortIPAddrsByCmp type is used to +// // sort IPAddrs by Cmp() +// type SortIPAddrsByCmp struct{ IPAddrs } + +// // Less reports whether the element with index i should sort before the +// // element with index j. +// func (s SortIPAddrsByCmp) Less(i, j int) bool { +// // Sort by Type, then address, then port number. +// return Less(s.IPAddrs[i], s.IPAddrs[j]) +// } + +// SortIPAddrsBySpecificMaskLen is a type that satisfies sort.Interface and +// can be used by the routines in this package. The +// SortIPAddrsBySpecificMaskLen type is used to sort IPAddrs by smallest +// network (most specific to largest network). +type SortIPAddrsByNetworkSize struct{ IPAddrs } + +// Less reports whether the element with index i should sort before the +// element with index j. +func (s SortIPAddrsByNetworkSize) Less(i, j int) bool { + // Sort masks with a larger binary value (i.e. fewer hosts per network + // prefix) after masks with a smaller value (larger number of hosts per + // prefix). + switch bytes.Compare([]byte(*s.IPAddrs[i].NetIPMask()), []byte(*s.IPAddrs[j].NetIPMask())) { + case 0: + // Fall through to the second test if the net.IPMasks are the + // same. + break + case 1: + return true + case -1: + return false + default: + panic("bad, m'kay?") + } + + // Sort IPs based on the length (i.e. prefer IPv4 over IPv6). + iLen := len(*s.IPAddrs[i].NetIP()) + jLen := len(*s.IPAddrs[j].NetIP()) + if iLen != jLen { + return iLen > jLen + } + + // Sort IPs based on their network address from lowest to highest. + switch bytes.Compare(s.IPAddrs[i].NetIPNet().IP, s.IPAddrs[j].NetIPNet().IP) { + case 0: + break + case 1: + return false + case -1: + return true + default: + panic("lol wut?") + } + + // If a host does not have a port set, it always sorts after hosts + // that have a port (e.g. a host with a /32 and port number is more + // specific and should sort first over a host with a /32 but no port + // set). + if s.IPAddrs[i].IPPort() == 0 || s.IPAddrs[j].IPPort() == 0 { + return false + } + return s.IPAddrs[i].IPPort() < s.IPAddrs[j].IPPort() +} + +// SortIPAddrsBySpecificMaskLen is a type that satisfies sort.Interface and +// can be used by the routines in this package. The +// SortIPAddrsBySpecificMaskLen type is used to sort IPAddrs by smallest +// network (most specific to largest network). +type SortIPAddrsBySpecificMaskLen struct{ IPAddrs } + +// Less reports whether the element with index i should sort before the +// element with index j. +func (s SortIPAddrsBySpecificMaskLen) Less(i, j int) bool { + return s.IPAddrs[i].Maskbits() > s.IPAddrs[j].Maskbits() +} + +// SortIPAddrsByBroadMaskLen is a type that satisfies sort.Interface and can +// be used by the routines in this package. The SortIPAddrsByBroadMaskLen +// type is used to sort IPAddrs by largest network (i.e. largest subnets +// first). +type SortIPAddrsByBroadMaskLen struct{ IPAddrs } + +// Less reports whether the element with index i should sort before the +// element with index j. +func (s SortIPAddrsByBroadMaskLen) Less(i, j int) bool { + return s.IPAddrs[i].Maskbits() < s.IPAddrs[j].Maskbits() +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go b/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go new file mode 100644 index 00000000000..4d395dc954b --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go @@ -0,0 +1,516 @@ +package sockaddr + +import ( + "encoding/binary" + "fmt" + "net" + "regexp" + "strconv" + "strings" +) + +type ( + // IPv4Address is a named type representing an IPv4 address. + IPv4Address uint32 + + // IPv4Network is a named type representing an IPv4 network. + IPv4Network uint32 + + // IPv4Mask is a named type representing an IPv4 network mask. + IPv4Mask uint32 +) + +// IPv4HostMask is a constant represents a /32 IPv4 Address +// (i.e. 255.255.255.255). +const IPv4HostMask = IPv4Mask(0xffffffff) + +// ipv4AddrAttrMap is a map of the IPv4Addr type-specific attributes. +var ipv4AddrAttrMap map[AttrName]func(IPv4Addr) string +var ipv4AddrAttrs []AttrName +var trailingHexNetmaskRE *regexp.Regexp + +// IPv4Addr implements a convenience wrapper around the union of Go's +// built-in net.IP and net.IPNet types. In UNIX-speak, IPv4Addr implements +// `sockaddr` when the the address family is set to AF_INET +// (i.e. `sockaddr_in`). +type IPv4Addr struct { + IPAddr + Address IPv4Address + Mask IPv4Mask + Port IPPort +} + +func init() { + ipv4AddrInit() + trailingHexNetmaskRE = regexp.MustCompile(`/([0f]{8})$`) +} + +// NewIPv4Addr creates an IPv4Addr from a string. String can be in the form +// of either an IPv4:port (e.g. `1.2.3.4:80`, in which case the mask is +// assumed to be a `/32`), an IPv4 address (e.g. `1.2.3.4`, also with a `/32` +// mask), or an IPv4 CIDR (e.g. `1.2.3.4/24`, which has its IP port +// initialized to zero). ipv4Str can not be a hostname. +// +// NOTE: Many net.*() routines will initialize and return an IPv6 address. +// To create uint32 values from net.IP, always test to make sure the address +// returned can be converted to a 4 byte array using To4(). +func NewIPv4Addr(ipv4Str string) (IPv4Addr, error) { + // Strip off any bogus hex-encoded netmasks that will be mis-parsed by Go. In + // particular, clients with the Barracuda VPN client will see something like: + // `192.168.3.51/00ffffff` as their IP address. + trailingHexNetmaskRe := trailingHexNetmaskRE.Copy() + if match := trailingHexNetmaskRe.FindStringIndex(ipv4Str); match != nil { + ipv4Str = ipv4Str[:match[0]] + } + + // Parse as an IPv4 CIDR + ipAddr, network, err := net.ParseCIDR(ipv4Str) + if err == nil { + ipv4 := ipAddr.To4() + if ipv4 == nil { + return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address", ipv4Str) + } + + // If we see an IPv6 netmask, convert it to an IPv4 mask. + netmaskSepPos := strings.LastIndexByte(ipv4Str, '/') + if netmaskSepPos != -1 && netmaskSepPos+1 < len(ipv4Str) { + netMask, err := strconv.ParseUint(ipv4Str[netmaskSepPos+1:], 10, 8) + if err != nil { + return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: unable to parse CIDR netmask: %v", ipv4Str, err) + } else if netMask > 128 { + return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: invalid CIDR netmask", ipv4Str) + } + + if netMask >= 96 { + // Convert the IPv6 netmask to an IPv4 netmask + network.Mask = net.CIDRMask(int(netMask-96), IPv4len*8) + } + } + ipv4Addr := IPv4Addr{ + Address: IPv4Address(binary.BigEndian.Uint32(ipv4)), + Mask: IPv4Mask(binary.BigEndian.Uint32(network.Mask)), + } + return ipv4Addr, nil + } + + // Attempt to parse ipv4Str as a /32 host with a port number. + tcpAddr, err := net.ResolveTCPAddr("tcp4", ipv4Str) + if err == nil { + ipv4 := tcpAddr.IP.To4() + if ipv4 == nil { + return IPv4Addr{}, fmt.Errorf("Unable to resolve %+q as an IPv4 address", ipv4Str) + } + + ipv4Uint32 := binary.BigEndian.Uint32(ipv4) + ipv4Addr := IPv4Addr{ + Address: IPv4Address(ipv4Uint32), + Mask: IPv4HostMask, + Port: IPPort(tcpAddr.Port), + } + + return ipv4Addr, nil + } + + // Parse as a naked IPv4 address + ip := net.ParseIP(ipv4Str) + if ip != nil { + ipv4 := ip.To4() + if ipv4 == nil { + return IPv4Addr{}, fmt.Errorf("Unable to string convert %+q to an IPv4 address", ipv4Str) + } + + ipv4Uint32 := binary.BigEndian.Uint32(ipv4) + ipv4Addr := IPv4Addr{ + Address: IPv4Address(ipv4Uint32), + Mask: IPv4HostMask, + } + return ipv4Addr, nil + } + + return IPv4Addr{}, fmt.Errorf("Unable to parse %+q to an IPv4 address: %v", ipv4Str, err) +} + +// AddressBinString returns a string with the IPv4Addr's Address represented +// as a sequence of '0' and '1' characters. This method is useful for +// debugging or by operators who want to inspect an address. +func (ipv4 IPv4Addr) AddressBinString() string { + return fmt.Sprintf("%032s", strconv.FormatUint(uint64(ipv4.Address), 2)) +} + +// AddressHexString returns a string with the IPv4Addr address represented as +// a sequence of hex characters. This method is useful for debugging or by +// operators who want to inspect an address. +func (ipv4 IPv4Addr) AddressHexString() string { + return fmt.Sprintf("%08s", strconv.FormatUint(uint64(ipv4.Address), 16)) +} + +// Broadcast is an IPv4Addr-only method that returns the broadcast address of +// the network. +// +// NOTE: IPv6 only supports multicast, so this method only exists for +// IPv4Addr. +func (ipv4 IPv4Addr) Broadcast() IPAddr { + // Nothing should listen on a broadcast address. + return IPv4Addr{ + Address: IPv4Address(ipv4.BroadcastAddress()), + Mask: IPv4HostMask, + } +} + +// BroadcastAddress returns a IPv4Network of the IPv4Addr's broadcast +// address. +func (ipv4 IPv4Addr) BroadcastAddress() IPv4Network { + return IPv4Network(uint32(ipv4.Address)&uint32(ipv4.Mask) | ^uint32(ipv4.Mask)) +} + +// CmpAddress follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its address is lower than arg +// - 0 if the SockAddr arg is equal to the receiving IPv4Addr or the argument is +// of a different type. +// - 1 If the argument should sort first. +func (ipv4 IPv4Addr) CmpAddress(sa SockAddr) int { + ipv4b, ok := sa.(IPv4Addr) + if !ok { + return sortDeferDecision + } + + switch { + case ipv4.Address == ipv4b.Address: + return sortDeferDecision + case ipv4.Address < ipv4b.Address: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// CmpPort follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its port is lower than arg +// - 0 if the SockAddr arg's port number is equal to the receiving IPv4Addr, +// regardless of type. +// - 1 If the argument should sort first. +func (ipv4 IPv4Addr) CmpPort(sa SockAddr) int { + var saPort IPPort + switch v := sa.(type) { + case IPv4Addr: + saPort = v.Port + case IPv6Addr: + saPort = v.Port + default: + return sortDeferDecision + } + + switch { + case ipv4.Port == saPort: + return sortDeferDecision + case ipv4.Port < saPort: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// CmpRFC follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because it belongs to the RFC and its +// arg does not +// - 0 if the receiver and arg both belong to the same RFC or neither do. +// - 1 If the arg belongs to the RFC but receiver does not. +func (ipv4 IPv4Addr) CmpRFC(rfcNum uint, sa SockAddr) int { + recvInRFC := IsRFC(rfcNum, ipv4) + ipv4b, ok := sa.(IPv4Addr) + if !ok { + // If the receiver is part of the desired RFC and the SockAddr + // argument is not, return -1 so that the receiver sorts before + // the non-IPv4 SockAddr. Conversely, if the receiver is not + // part of the RFC, punt on sorting and leave it for the next + // sorter. + if recvInRFC { + return sortReceiverBeforeArg + } else { + return sortDeferDecision + } + } + + argInRFC := IsRFC(rfcNum, ipv4b) + switch { + case (recvInRFC && argInRFC), (!recvInRFC && !argInRFC): + // If a and b both belong to the RFC, or neither belong to + // rfcNum, defer sorting to the next sorter. + return sortDeferDecision + case recvInRFC && !argInRFC: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// Contains returns true if the SockAddr is contained within the receiver. +func (ipv4 IPv4Addr) Contains(sa SockAddr) bool { + ipv4b, ok := sa.(IPv4Addr) + if !ok { + return false + } + + return ipv4.ContainsNetwork(ipv4b) +} + +// ContainsAddress returns true if the IPv4Address is contained within the +// receiver. +func (ipv4 IPv4Addr) ContainsAddress(x IPv4Address) bool { + return IPv4Address(ipv4.NetworkAddress()) <= x && + IPv4Address(ipv4.BroadcastAddress()) >= x +} + +// ContainsNetwork returns true if the network from IPv4Addr is contained +// within the receiver. +func (ipv4 IPv4Addr) ContainsNetwork(x IPv4Addr) bool { + return ipv4.NetworkAddress() <= x.NetworkAddress() && + ipv4.BroadcastAddress() >= x.BroadcastAddress() +} + +// DialPacketArgs returns the arguments required to be passed to +// net.DialUDP(). If the Mask of ipv4 is not a /32 or the Port is 0, +// DialPacketArgs() will fail. See Host() to create an IPv4Addr with its +// mask set to /32. +func (ipv4 IPv4Addr) DialPacketArgs() (network, dialArgs string) { + if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 { + return "udp4", "" + } + return "udp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) +} + +// DialStreamArgs returns the arguments required to be passed to +// net.DialTCP(). If the Mask of ipv4 is not a /32 or the Port is 0, +// DialStreamArgs() will fail. See Host() to create an IPv4Addr with its +// mask set to /32. +func (ipv4 IPv4Addr) DialStreamArgs() (network, dialArgs string) { + if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 { + return "tcp4", "" + } + return "tcp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) +} + +// Equal returns true if a SockAddr is equal to the receiving IPv4Addr. +func (ipv4 IPv4Addr) Equal(sa SockAddr) bool { + ipv4b, ok := sa.(IPv4Addr) + if !ok { + return false + } + + if ipv4.Port != ipv4b.Port { + return false + } + + if ipv4.Address != ipv4b.Address { + return false + } + + if ipv4.NetIPNet().String() != ipv4b.NetIPNet().String() { + return false + } + + return true +} + +// FirstUsable returns an IPv4Addr set to the first address following the +// network prefix. The first usable address in a network is normally the +// gateway and should not be used except by devices forwarding packets +// between two administratively distinct networks (i.e. a router). This +// function does not discriminate against first usable vs "first address that +// should be used." For example, FirstUsable() on "192.168.1.10/24" would +// return the address "192.168.1.1/24". +func (ipv4 IPv4Addr) FirstUsable() IPAddr { + addr := ipv4.NetworkAddress() + + // If /32, return the address itself. If /31 assume a point-to-point + // link and return the lower address. + if ipv4.Maskbits() < 31 { + addr++ + } + + return IPv4Addr{ + Address: IPv4Address(addr), + Mask: IPv4HostMask, + } +} + +// Host returns a copy of ipv4 with its mask set to /32 so that it can be +// used by DialPacketArgs(), DialStreamArgs(), ListenPacketArgs(), or +// ListenStreamArgs(). +func (ipv4 IPv4Addr) Host() IPAddr { + // Nothing should listen on a broadcast address. + return IPv4Addr{ + Address: ipv4.Address, + Mask: IPv4HostMask, + Port: ipv4.Port, + } +} + +// IPPort returns the Port number attached to the IPv4Addr +func (ipv4 IPv4Addr) IPPort() IPPort { + return ipv4.Port +} + +// LastUsable returns the last address before the broadcast address in a +// given network. +func (ipv4 IPv4Addr) LastUsable() IPAddr { + addr := ipv4.BroadcastAddress() + + // If /32, return the address itself. If /31 assume a point-to-point + // link and return the upper address. + if ipv4.Maskbits() < 31 { + addr-- + } + + return IPv4Addr{ + Address: IPv4Address(addr), + Mask: IPv4HostMask, + } +} + +// ListenPacketArgs returns the arguments required to be passed to +// net.ListenUDP(). If the Mask of ipv4 is not a /32, ListenPacketArgs() +// will fail. See Host() to create an IPv4Addr with its mask set to /32. +func (ipv4 IPv4Addr) ListenPacketArgs() (network, listenArgs string) { + if ipv4.Mask != IPv4HostMask { + return "udp4", "" + } + return "udp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) +} + +// ListenStreamArgs returns the arguments required to be passed to +// net.ListenTCP(). If the Mask of ipv4 is not a /32, ListenStreamArgs() +// will fail. See Host() to create an IPv4Addr with its mask set to /32. +func (ipv4 IPv4Addr) ListenStreamArgs() (network, listenArgs string) { + if ipv4.Mask != IPv4HostMask { + return "tcp4", "" + } + return "tcp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) +} + +// Maskbits returns the number of network mask bits in a given IPv4Addr. For +// example, the Maskbits() of "192.168.1.1/24" would return 24. +func (ipv4 IPv4Addr) Maskbits() int { + mask := make(net.IPMask, IPv4len) + binary.BigEndian.PutUint32(mask, uint32(ipv4.Mask)) + maskOnes, _ := mask.Size() + return maskOnes +} + +// MustIPv4Addr is a helper method that must return an IPv4Addr or panic on +// invalid input. +func MustIPv4Addr(addr string) IPv4Addr { + ipv4, err := NewIPv4Addr(addr) + if err != nil { + panic(fmt.Sprintf("Unable to create an IPv4Addr from %+q: %v", addr, err)) + } + return ipv4 +} + +// NetIP returns the address as a net.IP (address is always presized to +// IPv4). +func (ipv4 IPv4Addr) NetIP() *net.IP { + x := make(net.IP, IPv4len) + binary.BigEndian.PutUint32(x, uint32(ipv4.Address)) + return &x +} + +// NetIPMask create a new net.IPMask from the IPv4Addr. +func (ipv4 IPv4Addr) NetIPMask() *net.IPMask { + ipv4Mask := net.IPMask{} + ipv4Mask = make(net.IPMask, IPv4len) + binary.BigEndian.PutUint32(ipv4Mask, uint32(ipv4.Mask)) + return &ipv4Mask +} + +// NetIPNet create a new net.IPNet from the IPv4Addr. +func (ipv4 IPv4Addr) NetIPNet() *net.IPNet { + ipv4net := &net.IPNet{} + ipv4net.IP = make(net.IP, IPv4len) + binary.BigEndian.PutUint32(ipv4net.IP, uint32(ipv4.NetworkAddress())) + ipv4net.Mask = *ipv4.NetIPMask() + return ipv4net +} + +// Network returns the network prefix or network address for a given network. +func (ipv4 IPv4Addr) Network() IPAddr { + return IPv4Addr{ + Address: IPv4Address(ipv4.NetworkAddress()), + Mask: ipv4.Mask, + } +} + +// NetworkAddress returns an IPv4Network of the IPv4Addr's network address. +func (ipv4 IPv4Addr) NetworkAddress() IPv4Network { + return IPv4Network(uint32(ipv4.Address) & uint32(ipv4.Mask)) +} + +// Octets returns a slice of the four octets in an IPv4Addr's Address. The +// order of the bytes is big endian. +func (ipv4 IPv4Addr) Octets() []int { + return []int{ + int(ipv4.Address >> 24), + int((ipv4.Address >> 16) & 0xff), + int((ipv4.Address >> 8) & 0xff), + int(ipv4.Address & 0xff), + } +} + +// String returns a string representation of the IPv4Addr +func (ipv4 IPv4Addr) String() string { + if ipv4.Port != 0 { + return fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) + } + + if ipv4.Maskbits() == 32 { + return ipv4.NetIP().String() + } + + return fmt.Sprintf("%s/%d", ipv4.NetIP().String(), ipv4.Maskbits()) +} + +// Type is used as a type switch and returns TypeIPv4 +func (IPv4Addr) Type() SockAddrType { + return TypeIPv4 +} + +// IPv4AddrAttr returns a string representation of an attribute for the given +// IPv4Addr. +func IPv4AddrAttr(ipv4 IPv4Addr, selector AttrName) string { + fn, found := ipv4AddrAttrMap[selector] + if !found { + return "" + } + + return fn(ipv4) +} + +// IPv4Attrs returns a list of attributes supported by the IPv4Addr type +func IPv4Attrs() []AttrName { + return ipv4AddrAttrs +} + +// ipv4AddrInit is called once at init() +func ipv4AddrInit() { + // Sorted for human readability + ipv4AddrAttrs = []AttrName{ + "size", // Same position as in IPv6 for output consistency + "broadcast", + "uint32", + } + + ipv4AddrAttrMap = map[AttrName]func(ipv4 IPv4Addr) string{ + "broadcast": func(ipv4 IPv4Addr) string { + return ipv4.Broadcast().String() + }, + "size": func(ipv4 IPv4Addr) string { + return fmt.Sprintf("%d", 1< 2 && ipv6Str[0] == '[' && ipv6Str[len(ipv6Str)-1] == ']' { + ipv6Str = ipv6Str[1 : len(ipv6Str)-1] + } + ip := net.ParseIP(ipv6Str) + if ip != nil { + ipv6 := ip.To16() + if ipv6 == nil { + return IPv6Addr{}, fmt.Errorf("Unable to string convert %+q to a 16byte IPv6 address", ipv6Str) + } + + ipv6BigIntAddr := new(big.Int) + ipv6BigIntAddr.SetBytes(ipv6) + + ipv6BigIntMask := new(big.Int) + ipv6BigIntMask.Set(ipv6HostMask) + + return IPv6Addr{ + Address: IPv6Address(ipv6BigIntAddr), + Mask: IPv6Mask(ipv6BigIntMask), + }, nil + } + + // Parse as an IPv6 CIDR + ipAddr, network, err := net.ParseCIDR(ipv6Str) + if err == nil { + ipv6 := ipAddr.To16() + if ipv6 == nil { + return IPv6Addr{}, fmt.Errorf("Unable to convert %+q to a 16byte IPv6 address", ipv6Str) + } + + ipv6BigIntAddr := new(big.Int) + ipv6BigIntAddr.SetBytes(ipv6) + + ipv6BigIntMask := new(big.Int) + ipv6BigIntMask.SetBytes(network.Mask) + + ipv6Addr := IPv6Addr{ + Address: IPv6Address(ipv6BigIntAddr), + Mask: IPv6Mask(ipv6BigIntMask), + } + return ipv6Addr, nil + } + + return IPv6Addr{}, fmt.Errorf("Unable to parse %+q to an IPv6 address: %v", ipv6Str, err) +} + +// AddressBinString returns a string with the IPv6Addr's Address represented +// as a sequence of '0' and '1' characters. This method is useful for +// debugging or by operators who want to inspect an address. +func (ipv6 IPv6Addr) AddressBinString() string { + bi := big.Int(*ipv6.Address) + return fmt.Sprintf("%0128s", bi.Text(2)) +} + +// AddressHexString returns a string with the IPv6Addr address represented as +// a sequence of hex characters. This method is useful for debugging or by +// operators who want to inspect an address. +func (ipv6 IPv6Addr) AddressHexString() string { + bi := big.Int(*ipv6.Address) + return fmt.Sprintf("%032s", bi.Text(16)) +} + +// CmpAddress follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its address is lower than arg +// - 0 if the SockAddr arg equal to the receiving IPv6Addr or the argument is of a +// different type. +// - 1 If the argument should sort first. +func (ipv6 IPv6Addr) CmpAddress(sa SockAddr) int { + ipv6b, ok := sa.(IPv6Addr) + if !ok { + return sortDeferDecision + } + + ipv6aBigInt := new(big.Int) + ipv6aBigInt.Set(ipv6.Address) + ipv6bBigInt := new(big.Int) + ipv6bBigInt.Set(ipv6b.Address) + + return ipv6aBigInt.Cmp(ipv6bBigInt) +} + +// CmpPort follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its port is lower than arg +// - 0 if the SockAddr arg's port number is equal to the receiving IPv6Addr, +// regardless of type. +// - 1 If the argument should sort first. +func (ipv6 IPv6Addr) CmpPort(sa SockAddr) int { + var saPort IPPort + switch v := sa.(type) { + case IPv4Addr: + saPort = v.Port + case IPv6Addr: + saPort = v.Port + default: + return sortDeferDecision + } + + switch { + case ipv6.Port == saPort: + return sortDeferDecision + case ipv6.Port < saPort: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// CmpRFC follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because it belongs to the RFC and its +// arg does not +// - 0 if the receiver and arg both belong to the same RFC or neither do. +// - 1 If the arg belongs to the RFC but receiver does not. +func (ipv6 IPv6Addr) CmpRFC(rfcNum uint, sa SockAddr) int { + recvInRFC := IsRFC(rfcNum, ipv6) + ipv6b, ok := sa.(IPv6Addr) + if !ok { + // If the receiver is part of the desired RFC and the SockAddr + // argument is not, sort receiver before the non-IPv6 SockAddr. + // Conversely, if the receiver is not part of the RFC, punt on + // sorting and leave it for the next sorter. + if recvInRFC { + return sortReceiverBeforeArg + } else { + return sortDeferDecision + } + } + + argInRFC := IsRFC(rfcNum, ipv6b) + switch { + case (recvInRFC && argInRFC), (!recvInRFC && !argInRFC): + // If a and b both belong to the RFC, or neither belong to + // rfcNum, defer sorting to the next sorter. + return sortDeferDecision + case recvInRFC && !argInRFC: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// Contains returns true if the SockAddr is contained within the receiver. +func (ipv6 IPv6Addr) Contains(sa SockAddr) bool { + ipv6b, ok := sa.(IPv6Addr) + if !ok { + return false + } + + return ipv6.ContainsNetwork(ipv6b) +} + +// ContainsAddress returns true if the IPv6Address is contained within the +// receiver. +func (ipv6 IPv6Addr) ContainsAddress(x IPv6Address) bool { + xAddr := IPv6Addr{ + Address: x, + Mask: ipv6HostMask, + } + + { + xIPv6 := xAddr.FirstUsable().(IPv6Addr) + yIPv6 := ipv6.FirstUsable().(IPv6Addr) + if xIPv6.CmpAddress(yIPv6) >= 1 { + return false + } + } + + { + xIPv6 := xAddr.LastUsable().(IPv6Addr) + yIPv6 := ipv6.LastUsable().(IPv6Addr) + if xIPv6.CmpAddress(yIPv6) <= -1 { + return false + } + } + return true +} + +// ContainsNetwork returns true if the network from IPv6Addr is contained within +// the receiver. +func (x IPv6Addr) ContainsNetwork(y IPv6Addr) bool { + { + xIPv6 := x.FirstUsable().(IPv6Addr) + yIPv6 := y.FirstUsable().(IPv6Addr) + if ret := xIPv6.CmpAddress(yIPv6); ret >= 1 { + return false + } + } + + { + xIPv6 := x.LastUsable().(IPv6Addr) + yIPv6 := y.LastUsable().(IPv6Addr) + if ret := xIPv6.CmpAddress(yIPv6); ret <= -1 { + return false + } + } + return true +} + +// DialPacketArgs returns the arguments required to be passed to +// net.DialUDP(). If the Mask of ipv6 is not a /128 or the Port is 0, +// DialPacketArgs() will fail. See Host() to create an IPv6Addr with its +// mask set to /128. +func (ipv6 IPv6Addr) DialPacketArgs() (network, dialArgs string) { + ipv6Mask := big.Int(*ipv6.Mask) + if ipv6Mask.Cmp(ipv6HostMask) != 0 || ipv6.Port == 0 { + return "udp6", "" + } + return "udp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) +} + +// DialStreamArgs returns the arguments required to be passed to +// net.DialTCP(). If the Mask of ipv6 is not a /128 or the Port is 0, +// DialStreamArgs() will fail. See Host() to create an IPv6Addr with its +// mask set to /128. +func (ipv6 IPv6Addr) DialStreamArgs() (network, dialArgs string) { + ipv6Mask := big.Int(*ipv6.Mask) + if ipv6Mask.Cmp(ipv6HostMask) != 0 || ipv6.Port == 0 { + return "tcp6", "" + } + return "tcp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) +} + +// Equal returns true if a SockAddr is equal to the receiving IPv4Addr. +func (ipv6a IPv6Addr) Equal(sa SockAddr) bool { + ipv6b, ok := sa.(IPv6Addr) + if !ok { + return false + } + + if ipv6a.NetIP().String() != ipv6b.NetIP().String() { + return false + } + + if ipv6a.NetIPNet().String() != ipv6b.NetIPNet().String() { + return false + } + + if ipv6a.Port != ipv6b.Port { + return false + } + + return true +} + +// FirstUsable returns an IPv6Addr set to the first address following the +// network prefix. The first usable address in a network is normally the +// gateway and should not be used except by devices forwarding packets +// between two administratively distinct networks (i.e. a router). This +// function does not discriminate against first usable vs "first address that +// should be used." For example, FirstUsable() on "2001:0db8::0003/64" would +// return "2001:0db8::00011". +func (ipv6 IPv6Addr) FirstUsable() IPAddr { + return IPv6Addr{ + Address: IPv6Address(ipv6.NetworkAddress()), + Mask: ipv6HostMask, + } +} + +// Host returns a copy of ipv6 with its mask set to /128 so that it can be +// used by DialPacketArgs(), DialStreamArgs(), ListenPacketArgs(), or +// ListenStreamArgs(). +func (ipv6 IPv6Addr) Host() IPAddr { + // Nothing should listen on a broadcast address. + return IPv6Addr{ + Address: ipv6.Address, + Mask: ipv6HostMask, + Port: ipv6.Port, + } +} + +// IPPort returns the Port number attached to the IPv6Addr +func (ipv6 IPv6Addr) IPPort() IPPort { + return ipv6.Port +} + +// LastUsable returns the last address in a given network. +func (ipv6 IPv6Addr) LastUsable() IPAddr { + addr := new(big.Int) + addr.Set(ipv6.Address) + + mask := new(big.Int) + mask.Set(ipv6.Mask) + + negMask := new(big.Int) + negMask.Xor(ipv6HostMask, mask) + + lastAddr := new(big.Int) + lastAddr.And(addr, mask) + lastAddr.Or(lastAddr, negMask) + + return IPv6Addr{ + Address: IPv6Address(lastAddr), + Mask: ipv6HostMask, + } +} + +// ListenPacketArgs returns the arguments required to be passed to +// net.ListenUDP(). If the Mask of ipv6 is not a /128, ListenPacketArgs() +// will fail. See Host() to create an IPv6Addr with its mask set to /128. +func (ipv6 IPv6Addr) ListenPacketArgs() (network, listenArgs string) { + ipv6Mask := big.Int(*ipv6.Mask) + if ipv6Mask.Cmp(ipv6HostMask) != 0 { + return "udp6", "" + } + return "udp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) +} + +// ListenStreamArgs returns the arguments required to be passed to +// net.ListenTCP(). If the Mask of ipv6 is not a /128, ListenStreamArgs() +// will fail. See Host() to create an IPv6Addr with its mask set to /128. +func (ipv6 IPv6Addr) ListenStreamArgs() (network, listenArgs string) { + ipv6Mask := big.Int(*ipv6.Mask) + if ipv6Mask.Cmp(ipv6HostMask) != 0 { + return "tcp6", "" + } + return "tcp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) +} + +// Maskbits returns the number of network mask bits in a given IPv6Addr. For +// example, the Maskbits() of "2001:0db8::0003/64" would return 64. +func (ipv6 IPv6Addr) Maskbits() int { + maskOnes, _ := ipv6.NetIPNet().Mask.Size() + + return maskOnes +} + +// MustIPv6Addr is a helper method that must return an IPv6Addr or panic on +// invalid input. +func MustIPv6Addr(addr string) IPv6Addr { + ipv6, err := NewIPv6Addr(addr) + if err != nil { + panic(fmt.Sprintf("Unable to create an IPv6Addr from %+q: %v", addr, err)) + } + return ipv6 +} + +// NetIP returns the address as a net.IP. +func (ipv6 IPv6Addr) NetIP() *net.IP { + return bigIntToNetIPv6(ipv6.Address) +} + +// NetIPMask create a new net.IPMask from the IPv6Addr. +func (ipv6 IPv6Addr) NetIPMask() *net.IPMask { + ipv6Mask := make(net.IPMask, IPv6len) + m := big.Int(*ipv6.Mask) + copy(ipv6Mask, m.Bytes()) + return &ipv6Mask +} + +// Network returns a pointer to the net.IPNet within IPv4Addr receiver. +func (ipv6 IPv6Addr) NetIPNet() *net.IPNet { + ipv6net := &net.IPNet{} + ipv6net.IP = make(net.IP, IPv6len) + copy(ipv6net.IP, *ipv6.NetIP()) + ipv6net.Mask = *ipv6.NetIPMask() + return ipv6net +} + +// Network returns the network prefix or network address for a given network. +func (ipv6 IPv6Addr) Network() IPAddr { + return IPv6Addr{ + Address: IPv6Address(ipv6.NetworkAddress()), + Mask: ipv6.Mask, + } +} + +// NetworkAddress returns an IPv6Network of the IPv6Addr's network address. +func (ipv6 IPv6Addr) NetworkAddress() IPv6Network { + addr := new(big.Int) + addr.SetBytes((*ipv6.Address).Bytes()) + + mask := new(big.Int) + mask.SetBytes(*ipv6.NetIPMask()) + + netAddr := new(big.Int) + netAddr.And(addr, mask) + + return IPv6Network(netAddr) +} + +// Octets returns a slice of the 16 octets in an IPv6Addr's Address. The +// order of the bytes is big endian. +func (ipv6 IPv6Addr) Octets() []int { + x := make([]int, IPv6len) + for i, b := range *bigIntToNetIPv6(ipv6.Address) { + x[i] = int(b) + } + + return x +} + +// String returns a string representation of the IPv6Addr +func (ipv6 IPv6Addr) String() string { + if ipv6.Port != 0 { + return fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) + } + + if ipv6.Maskbits() == 128 { + return ipv6.NetIP().String() + } + + return fmt.Sprintf("%s/%d", ipv6.NetIP().String(), ipv6.Maskbits()) +} + +// Type is used as a type switch and returns TypeIPv6 +func (IPv6Addr) Type() SockAddrType { + return TypeIPv6 +} + +// IPv6Attrs returns a list of attributes supported by the IPv6Addr type +func IPv6Attrs() []AttrName { + return ipv6AddrAttrs +} + +// IPv6AddrAttr returns a string representation of an attribute for the given +// IPv6Addr. +func IPv6AddrAttr(ipv6 IPv6Addr, selector AttrName) string { + fn, found := ipv6AddrAttrMap[selector] + if !found { + return "" + } + + return fn(ipv6) +} + +// ipv6AddrInit is called once at init() +func ipv6AddrInit() { + // Sorted for human readability + ipv6AddrAttrs = []AttrName{ + "size", // Same position as in IPv6 for output consistency + "uint128", + } + + ipv6AddrAttrMap = map[AttrName]func(ipv6 IPv6Addr) string{ + "size": func(ipv6 IPv6Addr) string { + netSize := big.NewInt(1) + netSize = netSize.Lsh(netSize, uint(IPv6len*8-ipv6.Maskbits())) + return netSize.Text(10) + }, + "uint128": func(ipv6 IPv6Addr) string { + b := big.Int(*ipv6.Address) + return b.Text(10) + }, + } +} + +// bigIntToNetIPv6 is a helper function that correctly returns a net.IP with the +// correctly padded values. +func bigIntToNetIPv6(bi *big.Int) *net.IP { + x := make(net.IP, IPv6len) + ipv6Bytes := bi.Bytes() + + // It's possibe for ipv6Bytes to be less than IPv6len bytes in size. If + // they are different sizes we to pad the size of response. + if len(ipv6Bytes) < IPv6len { + buf := new(bytes.Buffer) + buf.Grow(IPv6len) + + for i := len(ipv6Bytes); i < IPv6len; i++ { + if err := binary.Write(buf, binary.BigEndian, byte(0)); err != nil { + panic(fmt.Sprintf("Unable to pad byte %d of input %v: %v", i, bi, err)) + } + } + + for _, b := range ipv6Bytes { + if err := binary.Write(buf, binary.BigEndian, b); err != nil { + panic(fmt.Sprintf("Unable to preserve endianness of input %v: %v", bi, err)) + } + } + + ipv6Bytes = buf.Bytes() + } + i := copy(x, ipv6Bytes) + if i != IPv6len { + panic("IPv6 wrong size") + } + return &x +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/rfc.go b/vendor/github.com/hashicorp/go-sockaddr/rfc.go new file mode 100644 index 00000000000..02e188f6fe6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/rfc.go @@ -0,0 +1,948 @@ +package sockaddr + +// ForwardingBlacklist is a faux RFC that includes a list of non-forwardable IP +// blocks. +const ForwardingBlacklist = 4294967295 +const ForwardingBlacklistRFC = "4294967295" + +// IsRFC tests to see if an SockAddr matches the specified RFC +func IsRFC(rfcNum uint, sa SockAddr) bool { + rfcNetMap := KnownRFCs() + rfcNets, ok := rfcNetMap[rfcNum] + if !ok { + return false + } + + var contained bool + for _, rfcNet := range rfcNets { + if rfcNet.Contains(sa) { + contained = true + break + } + } + return contained +} + +// KnownRFCs returns an initial set of known RFCs. +// +// NOTE (sean@): As this list evolves over time, please submit patches to keep +// this list current. If something isn't right, inquire, as it may just be a +// bug on my part. Some of the inclusions were based on my judgement as to what +// would be a useful value (e.g. RFC3330). +// +// Useful resources: +// +// * https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml +// * https://www.iana.org/assignments/ipv6-unicast-address-assignments/ipv6-unicast-address-assignments.xhtml +// * https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml +func KnownRFCs() map[uint]SockAddrs { + // NOTE(sean@): Multiple SockAddrs per RFC lend themselves well to a + // RADIX tree, but `ENOTIME`. Patches welcome. + return map[uint]SockAddrs{ + 919: { + // [RFC919] Broadcasting Internet Datagrams + MustIPv4Addr("255.255.255.255/32"), // [RFC1122], §7 Broadcast IP Addressing - Proposed Standards + }, + 1122: { + // [RFC1122] Requirements for Internet Hosts -- Communication Layers + MustIPv4Addr("0.0.0.0/8"), // [RFC1122], §3.2.1.3 + MustIPv4Addr("127.0.0.0/8"), // [RFC1122], §3.2.1.3 + }, + 1112: { + // [RFC1112] Host Extensions for IP Multicasting + MustIPv4Addr("224.0.0.0/4"), // [RFC1112], §4 Host Group Addresses + }, + 1918: { + // [RFC1918] Address Allocation for Private Internets + MustIPv4Addr("10.0.0.0/8"), + MustIPv4Addr("172.16.0.0/12"), + MustIPv4Addr("192.168.0.0/16"), + }, + 2544: { + // [RFC2544] Benchmarking Methodology for Network + // Interconnect Devices + MustIPv4Addr("198.18.0.0/15"), + }, + 2765: { + // [RFC2765] Stateless IP/ICMP Translation Algorithm + // (SIIT) (obsoleted by RFCs 6145, which itself was + // later obsoleted by 7915). + + // [RFC2765], §2.1 Addresses + MustIPv6Addr("0:0:0:0:0:ffff:0:0/96"), + }, + 2928: { + // [RFC2928] Initial IPv6 Sub-TLA ID Assignments + MustIPv6Addr("2001::/16"), // Superblock + //MustIPv6Addr("2001:0000::/23"), // IANA + //MustIPv6Addr("2001:0200::/23"), // APNIC + //MustIPv6Addr("2001:0400::/23"), // ARIN + //MustIPv6Addr("2001:0600::/23"), // RIPE NCC + //MustIPv6Addr("2001:0800::/23"), // (future assignment) + // ... + //MustIPv6Addr("2001:FE00::/23"), // (future assignment) + }, + 3056: { // 6to4 address + // [RFC3056] Connection of IPv6 Domains via IPv4 Clouds + + // [RFC3056], §2 IPv6 Prefix Allocation + MustIPv6Addr("2002::/16"), + }, + 3068: { + // [RFC3068] An Anycast Prefix for 6to4 Relay Routers + // (obsolete by RFC7526) + + // [RFC3068], § 6to4 Relay anycast address + MustIPv4Addr("192.88.99.0/24"), + + // [RFC3068], §2.5 6to4 IPv6 relay anycast address + // + // NOTE: /120 == 128-(32-24) + MustIPv6Addr("2002:c058:6301::/120"), + }, + 3171: { + // [RFC3171] IANA Guidelines for IPv4 Multicast Address Assignments + MustIPv4Addr("224.0.0.0/4"), + }, + 3330: { + // [RFC3330] Special-Use IPv4 Addresses + + // Addresses in this block refer to source hosts on + // "this" network. Address 0.0.0.0/32 may be used as a + // source address for this host on this network; other + // addresses within 0.0.0.0/8 may be used to refer to + // specified hosts on this network [RFC1700, page 4]. + MustIPv4Addr("0.0.0.0/8"), + + // 10.0.0.0/8 - This block is set aside for use in + // private networks. Its intended use is documented in + // [RFC1918]. Addresses within this block should not + // appear on the public Internet. + MustIPv4Addr("10.0.0.0/8"), + + // 14.0.0.0/8 - This block is set aside for assignments + // to the international system of Public Data Networks + // [RFC1700, page 181]. The registry of assignments + // within this block can be accessed from the "Public + // Data Network Numbers" link on the web page at + // http://www.iana.org/numbers.html. Addresses within + // this block are assigned to users and should be + // treated as such. + + // 24.0.0.0/8 - This block was allocated in early 1996 + // for use in provisioning IP service over cable + // television systems. Although the IANA initially was + // involved in making assignments to cable operators, + // this responsibility was transferred to American + // Registry for Internet Numbers (ARIN) in May 2001. + // Addresses within this block are assigned in the + // normal manner and should be treated as such. + + // 39.0.0.0/8 - This block was used in the "Class A + // Subnet Experiment" that commenced in May 1995, as + // documented in [RFC1797]. The experiment has been + // completed and this block has been returned to the + // pool of addresses reserved for future allocation or + // assignment. This block therefore no longer has a + // special use and is subject to allocation to a + // Regional Internet Registry for assignment in the + // normal manner. + + // 127.0.0.0/8 - This block is assigned for use as the Internet host + // loopback address. A datagram sent by a higher level protocol to an + // address anywhere within this block should loop back inside the host. + // This is ordinarily implemented using only 127.0.0.1/32 for loopback, + // but no addresses within this block should ever appear on any network + // anywhere [RFC1700, page 5]. + MustIPv4Addr("127.0.0.0/8"), + + // 128.0.0.0/16 - This block, corresponding to the + // numerically lowest of the former Class B addresses, + // was initially and is still reserved by the IANA. + // Given the present classless nature of the IP address + // space, the basis for the reservation no longer + // applies and addresses in this block are subject to + // future allocation to a Regional Internet Registry for + // assignment in the normal manner. + + // 169.254.0.0/16 - This is the "link local" block. It + // is allocated for communication between hosts on a + // single link. Hosts obtain these addresses by + // auto-configuration, such as when a DHCP server may + // not be found. + MustIPv4Addr("169.254.0.0/16"), + + // 172.16.0.0/12 - This block is set aside for use in + // private networks. Its intended use is documented in + // [RFC1918]. Addresses within this block should not + // appear on the public Internet. + MustIPv4Addr("172.16.0.0/12"), + + // 191.255.0.0/16 - This block, corresponding to the numerically highest + // to the former Class B addresses, was initially and is still reserved + // by the IANA. Given the present classless nature of the IP address + // space, the basis for the reservation no longer applies and addresses + // in this block are subject to future allocation to a Regional Internet + // Registry for assignment in the normal manner. + + // 192.0.0.0/24 - This block, corresponding to the + // numerically lowest of the former Class C addresses, + // was initially and is still reserved by the IANA. + // Given the present classless nature of the IP address + // space, the basis for the reservation no longer + // applies and addresses in this block are subject to + // future allocation to a Regional Internet Registry for + // assignment in the normal manner. + + // 192.0.2.0/24 - This block is assigned as "TEST-NET" for use in + // documentation and example code. It is often used in conjunction with + // domain names example.com or example.net in vendor and protocol + // documentation. Addresses within this block should not appear on the + // public Internet. + MustIPv4Addr("192.0.2.0/24"), + + // 192.88.99.0/24 - This block is allocated for use as 6to4 relay + // anycast addresses, according to [RFC3068]. + MustIPv4Addr("192.88.99.0/24"), + + // 192.168.0.0/16 - This block is set aside for use in private networks. + // Its intended use is documented in [RFC1918]. Addresses within this + // block should not appear on the public Internet. + MustIPv4Addr("192.168.0.0/16"), + + // 198.18.0.0/15 - This block has been allocated for use + // in benchmark tests of network interconnect devices. + // Its use is documented in [RFC2544]. + MustIPv4Addr("198.18.0.0/15"), + + // 223.255.255.0/24 - This block, corresponding to the + // numerically highest of the former Class C addresses, + // was initially and is still reserved by the IANA. + // Given the present classless nature of the IP address + // space, the basis for the reservation no longer + // applies and addresses in this block are subject to + // future allocation to a Regional Internet Registry for + // assignment in the normal manner. + + // 224.0.0.0/4 - This block, formerly known as the Class + // D address space, is allocated for use in IPv4 + // multicast address assignments. The IANA guidelines + // for assignments from this space are described in + // [RFC3171]. + MustIPv4Addr("224.0.0.0/4"), + + // 240.0.0.0/4 - This block, formerly known as the Class E address + // space, is reserved. The "limited broadcast" destination address + // 255.255.255.255 should never be forwarded outside the (sub-)net of + // the source. The remainder of this space is reserved + // for future use. [RFC1700, page 4] + MustIPv4Addr("240.0.0.0/4"), + }, + 3849: { + // [RFC3849] IPv6 Address Prefix Reserved for Documentation + MustIPv6Addr("2001:db8::/32"), // [RFC3849], §4 IANA Considerations + }, + 3927: { + // [RFC3927] Dynamic Configuration of IPv4 Link-Local Addresses + MustIPv4Addr("169.254.0.0/16"), // [RFC3927], §2.1 Link-Local Address Selection + }, + 4038: { + // [RFC4038] Application Aspects of IPv6 Transition + + // [RFC4038], §4.2. IPv6 Applications in a Dual-Stack Node + MustIPv6Addr("0:0:0:0:0:ffff::/96"), + }, + 4193: { + // [RFC4193] Unique Local IPv6 Unicast Addresses + MustIPv6Addr("fc00::/7"), + }, + 4291: { + // [RFC4291] IP Version 6 Addressing Architecture + + // [RFC4291], §2.5.2 The Unspecified Address + MustIPv6Addr("::/128"), + + // [RFC4291], §2.5.3 The Loopback Address + MustIPv6Addr("::1/128"), + + // [RFC4291], §2.5.5.1. IPv4-Compatible IPv6 Address + MustIPv6Addr("::/96"), + + // [RFC4291], §2.5.5.2. IPv4-Mapped IPv6 Address + MustIPv6Addr("::ffff:0:0/96"), + + // [RFC4291], §2.5.6 Link-Local IPv6 Unicast Addresses + MustIPv6Addr("fe80::/10"), + + // [RFC4291], §2.5.7 Site-Local IPv6 Unicast Addresses + // (depreciated) + MustIPv6Addr("fec0::/10"), + + // [RFC4291], §2.7 Multicast Addresses + MustIPv6Addr("ff00::/8"), + + // IPv6 Multicast Information. + // + // In the following "table" below, `ff0x` is replaced + // with the following values depending on the scope of + // the query: + // + // IPv6 Multicast Scopes: + // * ff00/9 // reserved + // * ff01/9 // interface-local + // * ff02/9 // link-local + // * ff03/9 // realm-local + // * ff04/9 // admin-local + // * ff05/9 // site-local + // * ff08/9 // organization-local + // * ff0e/9 // global + // * ff0f/9 // reserved + // + // IPv6 Multicast Addresses: + // * ff0x::2 // All routers + // * ff02::5 // OSPFIGP + // * ff02::6 // OSPFIGP Designated Routers + // * ff02::9 // RIP Routers + // * ff02::a // EIGRP Routers + // * ff02::d // All PIM Routers + // * ff02::1a // All RPL Routers + // * ff0x::fb // mDNSv6 + // * ff0x::101 // All Network Time Protocol (NTP) servers + // * ff02::1:1 // Link Name + // * ff02::1:2 // All-dhcp-agents + // * ff02::1:3 // Link-local Multicast Name Resolution + // * ff05::1:3 // All-dhcp-servers + // * ff02::1:ff00:0/104 // Solicited-node multicast address. + // * ff02::2:ff00:0/104 // Node Information Queries + }, + 4380: { + // [RFC4380] Teredo: Tunneling IPv6 over UDP through + // Network Address Translations (NATs) + + // [RFC4380], §2.6 Global Teredo IPv6 Service Prefix + MustIPv6Addr("2001:0000::/32"), + }, + 4773: { + // [RFC4773] Administration of the IANA Special Purpose IPv6 Address Block + MustIPv6Addr("2001:0000::/23"), // IANA + }, + 4843: { + // [RFC4843] An IPv6 Prefix for Overlay Routable Cryptographic Hash Identifiers (ORCHID) + MustIPv6Addr("2001:10::/28"), // [RFC4843], §7 IANA Considerations + }, + 5180: { + // [RFC5180] IPv6 Benchmarking Methodology for Network Interconnect Devices + MustIPv6Addr("2001:0200::/48"), // [RFC5180], §8 IANA Considerations + }, + 5735: { + // [RFC5735] Special Use IPv4 Addresses + MustIPv4Addr("192.0.2.0/24"), // TEST-NET-1 + MustIPv4Addr("198.51.100.0/24"), // TEST-NET-2 + MustIPv4Addr("203.0.113.0/24"), // TEST-NET-3 + MustIPv4Addr("198.18.0.0/15"), // Benchmarks + }, + 5737: { + // [RFC5737] IPv4 Address Blocks Reserved for Documentation + MustIPv4Addr("192.0.2.0/24"), // TEST-NET-1 + MustIPv4Addr("198.51.100.0/24"), // TEST-NET-2 + MustIPv4Addr("203.0.113.0/24"), // TEST-NET-3 + }, + 6052: { + // [RFC6052] IPv6 Addressing of IPv4/IPv6 Translators + MustIPv6Addr("64:ff9b::/96"), // [RFC6052], §2.1. Well-Known Prefix + }, + 6333: { + // [RFC6333] Dual-Stack Lite Broadband Deployments Following IPv4 Exhaustion + MustIPv4Addr("192.0.0.0/29"), // [RFC6333], §5.7 Well-Known IPv4 Address + }, + 6598: { + // [RFC6598] IANA-Reserved IPv4 Prefix for Shared Address Space + MustIPv4Addr("100.64.0.0/10"), + }, + 6666: { + // [RFC6666] A Discard Prefix for IPv6 + MustIPv6Addr("0100::/64"), + }, + 6890: { + // [RFC6890] Special-Purpose IP Address Registries + + // From "RFC6890 §2.2.1 Information Requirements": + /* + The IPv4 and IPv6 Special-Purpose Address Registries maintain the + following information regarding each entry: + + o Address Block - A block of IPv4 or IPv6 addresses that has been + registered for a special purpose. + + o Name - A descriptive name for the special-purpose address block. + + o RFC - The RFC through which the special-purpose address block was + requested. + + o Allocation Date - The date upon which the special-purpose address + block was allocated. + + o Termination Date - The date upon which the allocation is to be + terminated. This field is applicable for limited-use allocations + only. + + o Source - A boolean value indicating whether an address from the + allocated special-purpose address block is valid when used as the + source address of an IP datagram that transits two devices. + + o Destination - A boolean value indicating whether an address from + the allocated special-purpose address block is valid when used as + the destination address of an IP datagram that transits two + devices. + + o Forwardable - A boolean value indicating whether a router may + forward an IP datagram whose destination address is drawn from the + allocated special-purpose address block between external + interfaces. + + o Global - A boolean value indicating whether an IP datagram whose + destination address is drawn from the allocated special-purpose + address block is forwardable beyond a specified administrative + domain. + + o Reserved-by-Protocol - A boolean value indicating whether the + special-purpose address block is reserved by IP, itself. This + value is "TRUE" if the RFC that created the special-purpose + address block requires all compliant IP implementations to behave + in a special way when processing packets either to or from + addresses contained by the address block. + + If the value of "Destination" is FALSE, the values of "Forwardable" + and "Global" must also be false. + */ + + /*+----------------------+----------------------------+ + * | Attribute | Value | + * +----------------------+----------------------------+ + * | Address Block | 0.0.0.0/8 | + * | Name | "This host on this network"| + * | RFC | [RFC1122], Section 3.2.1.3 | + * | Allocation Date | September 1981 | + * | Termination Date | N/A | + * | Source | True | + * | Destination | False | + * | Forwardable | False | + * | Global | False | + * | Reserved-by-Protocol | True | + * +----------------------+----------------------------+*/ + MustIPv4Addr("0.0.0.0/8"), + + /*+----------------------+---------------+ + * | Attribute | Value | + * +----------------------+---------------+ + * | Address Block | 10.0.0.0/8 | + * | Name | Private-Use | + * | RFC | [RFC1918] | + * | Allocation Date | February 1996 | + * | Termination Date | N/A | + * | Source | True | + * | Destination | True | + * | Forwardable | True | + * | Global | False | + * | Reserved-by-Protocol | False | + * +----------------------+---------------+ */ + MustIPv4Addr("10.0.0.0/8"), + + /*+----------------------+----------------------+ + | Attribute | Value | + +----------------------+----------------------+ + | Address Block | 100.64.0.0/10 | + | Name | Shared Address Space | + | RFC | [RFC6598] | + | Allocation Date | April 2012 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------+*/ + MustIPv4Addr("100.64.0.0/10"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 127.0.0.0/8 | + | Name | Loopback | + | RFC | [RFC1122], Section 3.2.1.3 | + | Allocation Date | September 1981 | + | Termination Date | N/A | + | Source | False [1] | + | Destination | False [1] | + | Forwardable | False [1] | + | Global | False [1] | + | Reserved-by-Protocol | True | + +----------------------+----------------------------+*/ + // [1] Several protocols have been granted exceptions to + // this rule. For examples, see [RFC4379] and + // [RFC5884]. + MustIPv4Addr("127.0.0.0/8"), + + /*+----------------------+----------------+ + | Attribute | Value | + +----------------------+----------------+ + | Address Block | 169.254.0.0/16 | + | Name | Link Local | + | RFC | [RFC3927] | + | Allocation Date | May 2005 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+----------------+*/ + MustIPv4Addr("169.254.0.0/16"), + + /*+----------------------+---------------+ + | Attribute | Value | + +----------------------+---------------+ + | Address Block | 172.16.0.0/12 | + | Name | Private-Use | + | RFC | [RFC1918] | + | Allocation Date | February 1996 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+---------------+*/ + MustIPv4Addr("172.16.0.0/12"), + + /*+----------------------+---------------------------------+ + | Attribute | Value | + +----------------------+---------------------------------+ + | Address Block | 192.0.0.0/24 [2] | + | Name | IETF Protocol Assignments | + | RFC | Section 2.1 of this document | + | Allocation Date | January 2010 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+---------------------------------+*/ + // [2] Not usable unless by virtue of a more specific + // reservation. + MustIPv4Addr("192.0.0.0/24"), + + /*+----------------------+--------------------------------+ + | Attribute | Value | + +----------------------+--------------------------------+ + | Address Block | 192.0.0.0/29 | + | Name | IPv4 Service Continuity Prefix | + | RFC | [RFC6333], [RFC7335] | + | Allocation Date | June 2011 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+--------------------------------+*/ + MustIPv4Addr("192.0.0.0/29"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 192.0.2.0/24 | + | Name | Documentation (TEST-NET-1) | + | RFC | [RFC5737] | + | Allocation Date | January 2010 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------------+*/ + MustIPv4Addr("192.0.2.0/24"), + + /*+----------------------+--------------------+ + | Attribute | Value | + +----------------------+--------------------+ + | Address Block | 192.88.99.0/24 | + | Name | 6to4 Relay Anycast | + | RFC | [RFC3068] | + | Allocation Date | June 2001 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | True | + | Reserved-by-Protocol | False | + +----------------------+--------------------+*/ + MustIPv4Addr("192.88.99.0/24"), + + /*+----------------------+----------------+ + | Attribute | Value | + +----------------------+----------------+ + | Address Block | 192.168.0.0/16 | + | Name | Private-Use | + | RFC | [RFC1918] | + | Allocation Date | February 1996 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------+*/ + MustIPv4Addr("192.168.0.0/16"), + + /*+----------------------+---------------+ + | Attribute | Value | + +----------------------+---------------+ + | Address Block | 198.18.0.0/15 | + | Name | Benchmarking | + | RFC | [RFC2544] | + | Allocation Date | March 1999 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+---------------+*/ + MustIPv4Addr("198.18.0.0/15"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 198.51.100.0/24 | + | Name | Documentation (TEST-NET-2) | + | RFC | [RFC5737] | + | Allocation Date | January 2010 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------------+*/ + MustIPv4Addr("198.51.100.0/24"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 203.0.113.0/24 | + | Name | Documentation (TEST-NET-3) | + | RFC | [RFC5737] | + | Allocation Date | January 2010 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------------+*/ + MustIPv4Addr("203.0.113.0/24"), + + /*+----------------------+----------------------+ + | Attribute | Value | + +----------------------+----------------------+ + | Address Block | 240.0.0.0/4 | + | Name | Reserved | + | RFC | [RFC1112], Section 4 | + | Allocation Date | August 1989 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+----------------------+*/ + MustIPv4Addr("240.0.0.0/4"), + + /*+----------------------+----------------------+ + | Attribute | Value | + +----------------------+----------------------+ + | Address Block | 255.255.255.255/32 | + | Name | Limited Broadcast | + | RFC | [RFC0919], Section 7 | + | Allocation Date | October 1984 | + | Termination Date | N/A | + | Source | False | + | Destination | True | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------+*/ + MustIPv4Addr("255.255.255.255/32"), + + /*+----------------------+------------------+ + | Attribute | Value | + +----------------------+------------------+ + | Address Block | ::1/128 | + | Name | Loopback Address | + | RFC | [RFC4291] | + | Allocation Date | February 2006 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+------------------+*/ + MustIPv6Addr("::1/128"), + + /*+----------------------+---------------------+ + | Attribute | Value | + +----------------------+---------------------+ + | Address Block | ::/128 | + | Name | Unspecified Address | + | RFC | [RFC4291] | + | Allocation Date | February 2006 | + | Termination Date | N/A | + | Source | True | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+---------------------+*/ + MustIPv6Addr("::/128"), + + /*+----------------------+---------------------+ + | Attribute | Value | + +----------------------+---------------------+ + | Address Block | 64:ff9b::/96 | + | Name | IPv4-IPv6 Translat. | + | RFC | [RFC6052] | + | Allocation Date | October 2010 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | True | + | Reserved-by-Protocol | False | + +----------------------+---------------------+*/ + MustIPv6Addr("64:ff9b::/96"), + + /*+----------------------+---------------------+ + | Attribute | Value | + +----------------------+---------------------+ + | Address Block | ::ffff:0:0/96 | + | Name | IPv4-mapped Address | + | RFC | [RFC4291] | + | Allocation Date | February 2006 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+---------------------+*/ + MustIPv6Addr("::ffff:0:0/96"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 100::/64 | + | Name | Discard-Only Address Block | + | RFC | [RFC6666] | + | Allocation Date | June 2012 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------------+*/ + MustIPv6Addr("100::/64"), + + /*+----------------------+---------------------------+ + | Attribute | Value | + +----------------------+---------------------------+ + | Address Block | 2001::/23 | + | Name | IETF Protocol Assignments | + | RFC | [RFC2928] | + | Allocation Date | September 2000 | + | Termination Date | N/A | + | Source | False[1] | + | Destination | False[1] | + | Forwardable | False[1] | + | Global | False[1] | + | Reserved-by-Protocol | False | + +----------------------+---------------------------+*/ + // [1] Unless allowed by a more specific allocation. + MustIPv6Addr("2001::/16"), + + /*+----------------------+----------------+ + | Attribute | Value | + +----------------------+----------------+ + | Address Block | 2001::/32 | + | Name | TEREDO | + | RFC | [RFC4380] | + | Allocation Date | January 2006 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------+*/ + // Covered by previous entry, included for completeness. + // + // MustIPv6Addr("2001::/16"), + + /*+----------------------+----------------+ + | Attribute | Value | + +----------------------+----------------+ + | Address Block | 2001:2::/48 | + | Name | Benchmarking | + | RFC | [RFC5180] | + | Allocation Date | April 2008 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------+*/ + // Covered by previous entry, included for completeness. + // + // MustIPv6Addr("2001:2::/48"), + + /*+----------------------+---------------+ + | Attribute | Value | + +----------------------+---------------+ + | Address Block | 2001:db8::/32 | + | Name | Documentation | + | RFC | [RFC3849] | + | Allocation Date | July 2004 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+---------------+*/ + // Covered by previous entry, included for completeness. + // + // MustIPv6Addr("2001:db8::/32"), + + /*+----------------------+--------------+ + | Attribute | Value | + +----------------------+--------------+ + | Address Block | 2001:10::/28 | + | Name | ORCHID | + | RFC | [RFC4843] | + | Allocation Date | March 2007 | + | Termination Date | March 2014 | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+--------------+*/ + // Covered by previous entry, included for completeness. + // + // MustIPv6Addr("2001:10::/28"), + + /*+----------------------+---------------+ + | Attribute | Value | + +----------------------+---------------+ + | Address Block | 2002::/16 [2] | + | Name | 6to4 | + | RFC | [RFC3056] | + | Allocation Date | February 2001 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | N/A [2] | + | Reserved-by-Protocol | False | + +----------------------+---------------+*/ + // [2] See [RFC3056] for details. + MustIPv6Addr("2002::/16"), + + /*+----------------------+--------------+ + | Attribute | Value | + +----------------------+--------------+ + | Address Block | fc00::/7 | + | Name | Unique-Local | + | RFC | [RFC4193] | + | Allocation Date | October 2005 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+--------------+*/ + MustIPv6Addr("fc00::/7"), + + /*+----------------------+-----------------------+ + | Attribute | Value | + +----------------------+-----------------------+ + | Address Block | fe80::/10 | + | Name | Linked-Scoped Unicast | + | RFC | [RFC4291] | + | Allocation Date | February 2006 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+-----------------------+*/ + MustIPv6Addr("fe80::/10"), + }, + 7335: { + // [RFC7335] IPv4 Service Continuity Prefix + MustIPv4Addr("192.0.0.0/29"), // [RFC7335], §6 IANA Considerations + }, + ForwardingBlacklist: { // Pseudo-RFC + // Blacklist of non-forwardable IP blocks taken from RFC6890 + // + // TODO: the attributes for forwardable should be + // searcahble and embedded in the main list of RFCs + // above. + MustIPv4Addr("0.0.0.0/8"), + MustIPv4Addr("127.0.0.0/8"), + MustIPv4Addr("169.254.0.0/16"), + MustIPv4Addr("192.0.0.0/24"), + MustIPv4Addr("192.0.2.0/24"), + MustIPv4Addr("198.51.100.0/24"), + MustIPv4Addr("203.0.113.0/24"), + MustIPv4Addr("240.0.0.0/4"), + MustIPv4Addr("255.255.255.255/32"), + MustIPv6Addr("::1/128"), + MustIPv6Addr("::/128"), + MustIPv6Addr("::ffff:0:0/96"), + + // There is no way of expressing a whitelist per RFC2928 + // atm without creating a negative mask, which I don't + // want to do atm. + //MustIPv6Addr("2001::/23"), + + MustIPv6Addr("2001:db8::/32"), + MustIPv6Addr("2001:10::/28"), + MustIPv6Addr("fe80::/10"), + }, + } +} + +// VisitAllRFCs iterates over all known RFCs and calls the visitor +func VisitAllRFCs(fn func(rfcNum uint, sockaddrs SockAddrs)) { + rfcNetMap := KnownRFCs() + + // Blacklist of faux-RFCs. Don't show the world that we're abusing the + // RFC system in this library. + rfcBlacklist := map[uint]struct{}{ + ForwardingBlacklist: {}, + } + + for rfcNum, sas := range rfcNetMap { + if _, found := rfcBlacklist[rfcNum]; !found { + fn(rfcNum, sas) + } + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info.go b/vendor/github.com/hashicorp/go-sockaddr/route_info.go new file mode 100644 index 00000000000..2a3ee1db9e8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info.go @@ -0,0 +1,19 @@ +package sockaddr + +// RouteInterface specifies an interface for obtaining memoized route table and +// network information from a given OS. +type RouteInterface interface { + // GetDefaultInterfaceName returns the name of the interface that has a + // default route or an error and an empty string if a problem was + // encountered. + GetDefaultInterfaceName() (string, error) +} + +// VisitCommands visits each command used by the platform-specific RouteInfo +// implementation. +func (ri routeInfo) VisitCommands(fn func(name string, cmd []string)) { + for k, v := range ri.cmds { + cmds := append([]string(nil), v...) + fn(k, cmds) + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_android.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_android.go new file mode 100644 index 00000000000..9885915a6ba --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_android.go @@ -0,0 +1,34 @@ +package sockaddr + +import ( + "errors" + "os/exec" +) + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a Android-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + return routeInfo{ + cmds: map[string][]string{"ip": {"/system/bin/ip", "route", "get", "8.8.8.8"}}, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + out, err := exec.Command(ri.cmds["ip"][0], ri.cmds["ip"][1:]...).Output() + if err != nil { + return "", err + } + + + var ifName string + if ifName, err = parseDefaultIfNameFromIPCmdAndroid(string(out)); err != nil { + return "", errors.New("No default interface found") + } + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go new file mode 100644 index 00000000000..705757abc7b --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go @@ -0,0 +1,36 @@ +// +build darwin dragonfly freebsd netbsd openbsd + +package sockaddr + +import "os/exec" + +var cmds map[string][]string = map[string][]string{ + "route": {"/sbin/route", "-n", "get", "default"}, +} + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a BSD-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + return routeInfo{ + cmds: cmds, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + out, err := exec.Command(cmds["route"][0], cmds["route"][1:]...).Output() + if err != nil { + return "", err + } + + var ifName string + if ifName, err = parseDefaultIfNameFromRoute(string(out)); err != nil { + return "", err + } + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go new file mode 100644 index 00000000000..d1b009f6538 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go @@ -0,0 +1,10 @@ +// +build android nacl plan9 + +package sockaddr + +import "errors" + +// getDefaultIfName is the default interface function for unsupported platforms. +func getDefaultIfName() (string, error) { + return "", errors.New("No default interface found (unsupported platform)") +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go new file mode 100644 index 00000000000..b62ce6ecb21 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go @@ -0,0 +1,42 @@ +// +build !android + +package sockaddr + +import ( + "errors" + "os/exec" +) + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a Linux-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + // CoreOS Container Linux moved ip to /usr/bin/ip, so look it up on + // $PATH and fallback to /sbin/ip on error. + path, _ := exec.LookPath("ip") + if path == "" { + path = "/sbin/ip" + } + + return routeInfo{ + cmds: map[string][]string{"ip": {path, "route"}}, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + out, err := exec.Command(ri.cmds["ip"][0], ri.cmds["ip"][1:]...).Output() + if err != nil { + return "", err + } + + var ifName string + if ifName, err = parseDefaultIfNameFromIPCmd(string(out)); err != nil { + return "", errors.New("No default interface found") + } + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go new file mode 100644 index 00000000000..ee8e7984d79 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go @@ -0,0 +1,37 @@ +package sockaddr + +import ( + "errors" + "os/exec" +) + +var cmds map[string][]string = map[string][]string{ + "route": {"/usr/sbin/route", "-n", "get", "default"}, +} + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a BSD-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + return routeInfo{ + cmds: cmds, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + out, err := exec.Command(cmds["route"][0], cmds["route"][1:]...).Output() + if err != nil { + return "", err + } + + var ifName string + if ifName, err = parseDefaultIfNameFromRoute(string(out)); err != nil { + return "", errors.New("No default interface found") + } + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go new file mode 100644 index 00000000000..3da972883e8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go @@ -0,0 +1,41 @@ +package sockaddr + +import "os/exec" + +var cmds map[string][]string = map[string][]string{ + "netstat": {"netstat", "-rn"}, + "ipconfig": {"ipconfig"}, +} + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a BSD-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + return routeInfo{ + cmds: cmds, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + ifNameOut, err := exec.Command(cmds["netstat"][0], cmds["netstat"][1:]...).Output() + if err != nil { + return "", err + } + + ipconfigOut, err := exec.Command(cmds["ipconfig"][0], cmds["ipconfig"][1:]...).Output() + if err != nil { + return "", err + } + + ifName, err := parseDefaultIfNameWindows(string(ifNameOut), string(ipconfigOut)) + if err != nil { + return "", err + } + + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go b/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go new file mode 100644 index 00000000000..826c91c2e3d --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go @@ -0,0 +1,206 @@ +package sockaddr + +import ( + "encoding/json" + "fmt" + "strings" +) + +type SockAddrType int +type AttrName string + +const ( + TypeUnknown SockAddrType = 0x0 + TypeUnix = 0x1 + TypeIPv4 = 0x2 + TypeIPv6 = 0x4 + + // TypeIP is the union of TypeIPv4 and TypeIPv6 + TypeIP = 0x6 +) + +type SockAddr interface { + // CmpRFC returns 0 if SockAddr exactly matches one of the matched RFC + // networks, -1 if the receiver is contained within the RFC network, or + // 1 if the address is not contained within the RFC. + CmpRFC(rfcNum uint, sa SockAddr) int + + // Contains returns true if the SockAddr arg is contained within the + // receiver + Contains(SockAddr) bool + + // Equal allows for the comparison of two SockAddrs + Equal(SockAddr) bool + + DialPacketArgs() (string, string) + DialStreamArgs() (string, string) + ListenPacketArgs() (string, string) + ListenStreamArgs() (string, string) + + // String returns the string representation of SockAddr + String() string + + // Type returns the SockAddrType + Type() SockAddrType +} + +// sockAddrAttrMap is a map of the SockAddr type-specific attributes. +var sockAddrAttrMap map[AttrName]func(SockAddr) string +var sockAddrAttrs []AttrName + +func init() { + sockAddrInit() +} + +// New creates a new SockAddr from the string. The order in which New() +// attempts to construct a SockAddr is: IPv4Addr, IPv6Addr, SockAddrUnix. +// +// NOTE: New() relies on the heuristic wherein if the path begins with either a +// '.' or '/' character before creating a new UnixSock. For UNIX sockets that +// are absolute paths or are nested within a sub-directory, this works as +// expected, however if the UNIX socket is contained in the current working +// directory, this will fail unless the path begins with "./" +// (e.g. "./my-local-socket"). Calls directly to NewUnixSock() do not suffer +// this limitation. Invalid IP addresses such as "256.0.0.0/-1" will run afoul +// of this heuristic and be assumed to be a valid UNIX socket path (which they +// are, but it is probably not what you want and you won't realize it until you +// stat(2) the file system to discover it doesn't exist). +func NewSockAddr(s string) (SockAddr, error) { + ipv4Addr, err := NewIPv4Addr(s) + if err == nil { + return ipv4Addr, nil + } + + ipv6Addr, err := NewIPv6Addr(s) + if err == nil { + return ipv6Addr, nil + } + + // Check to make sure the string begins with either a '.' or '/', or + // contains a '/'. + if len(s) > 1 && (strings.IndexAny(s[0:1], "./") != -1 || strings.IndexByte(s, '/') != -1) { + unixSock, err := NewUnixSock(s) + if err == nil { + return unixSock, nil + } + } + + return nil, fmt.Errorf("Unable to convert %q to an IPv4 or IPv6 address, or a UNIX Socket", s) +} + +// ToIPAddr returns an IPAddr type or nil if the type conversion fails. +func ToIPAddr(sa SockAddr) *IPAddr { + ipa, ok := sa.(IPAddr) + if !ok { + return nil + } + return &ipa +} + +// ToIPv4Addr returns an IPv4Addr type or nil if the type conversion fails. +func ToIPv4Addr(sa SockAddr) *IPv4Addr { + switch v := sa.(type) { + case IPv4Addr: + return &v + default: + return nil + } +} + +// ToIPv6Addr returns an IPv6Addr type or nil if the type conversion fails. +func ToIPv6Addr(sa SockAddr) *IPv6Addr { + switch v := sa.(type) { + case IPv6Addr: + return &v + default: + return nil + } +} + +// ToUnixSock returns a UnixSock type or nil if the type conversion fails. +func ToUnixSock(sa SockAddr) *UnixSock { + switch v := sa.(type) { + case UnixSock: + return &v + default: + return nil + } +} + +// SockAddrAttr returns a string representation of an attribute for the given +// SockAddr. +func SockAddrAttr(sa SockAddr, selector AttrName) string { + fn, found := sockAddrAttrMap[selector] + if !found { + return "" + } + + return fn(sa) +} + +// String() for SockAddrType returns a string representation of the +// SockAddrType (e.g. "IPv4", "IPv6", "UNIX", "IP", or "unknown"). +func (sat SockAddrType) String() string { + switch sat { + case TypeIPv4: + return "IPv4" + case TypeIPv6: + return "IPv6" + // There is no concrete "IP" type. Leaving here as a reminder. + // case TypeIP: + // return "IP" + case TypeUnix: + return "UNIX" + default: + panic("unsupported type") + } +} + +// sockAddrInit is called once at init() +func sockAddrInit() { + sockAddrAttrs = []AttrName{ + "type", // type should be first + "string", + } + + sockAddrAttrMap = map[AttrName]func(sa SockAddr) string{ + "string": func(sa SockAddr) string { + return sa.String() + }, + "type": func(sa SockAddr) string { + return sa.Type().String() + }, + } +} + +// UnixSockAttrs returns a list of attributes supported by the UnixSock type +func SockAddrAttrs() []AttrName { + return sockAddrAttrs +} + +// Although this is pretty trivial to do in a program, having the logic here is +// useful all around. Note that this marshals into a *string* -- the underlying +// string representation of the sockaddr. If you then unmarshal into this type +// in Go, all will work as expected, but externally you can take what comes out +// and use the string value directly. +type SockAddrMarshaler struct { + SockAddr +} + +func (s *SockAddrMarshaler) MarshalJSON() ([]byte, error) { + return json.Marshal(s.SockAddr.String()) +} + +func (s *SockAddrMarshaler) UnmarshalJSON(in []byte) error { + var str string + err := json.Unmarshal(in, &str) + if err != nil { + return err + } + sa, err := NewSockAddr(str) + if err != nil { + return err + } + s.SockAddr = sa + return nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go new file mode 100644 index 00000000000..75fbffb1eab --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go @@ -0,0 +1,193 @@ +package sockaddr + +import ( + "bytes" + "sort" +) + +// SockAddrs is a slice of SockAddrs +type SockAddrs []SockAddr + +func (s SockAddrs) Len() int { return len(s) } +func (s SockAddrs) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// CmpAddrFunc is the function signature that must be met to be used in the +// OrderedAddrBy multiAddrSorter +type CmpAddrFunc func(p1, p2 *SockAddr) int + +// multiAddrSorter implements the Sort interface, sorting the SockAddrs within. +type multiAddrSorter struct { + addrs SockAddrs + cmp []CmpAddrFunc +} + +// Sort sorts the argument slice according to the Cmp functions passed to +// OrderedAddrBy. +func (ms *multiAddrSorter) Sort(sockAddrs SockAddrs) { + ms.addrs = sockAddrs + sort.Sort(ms) +} + +// OrderedAddrBy sorts SockAddr by the list of sort function pointers. +func OrderedAddrBy(cmpFuncs ...CmpAddrFunc) *multiAddrSorter { + return &multiAddrSorter{ + cmp: cmpFuncs, + } +} + +// Len is part of sort.Interface. +func (ms *multiAddrSorter) Len() int { + return len(ms.addrs) +} + +// Less is part of sort.Interface. It is implemented by looping along the +// Cmp() functions until it finds a comparison that is either less than, +// equal to, or greater than. +func (ms *multiAddrSorter) Less(i, j int) bool { + p, q := &ms.addrs[i], &ms.addrs[j] + // Try all but the last comparison. + var k int + for k = 0; k < len(ms.cmp)-1; k++ { + cmp := ms.cmp[k] + x := cmp(p, q) + switch x { + case -1: + // p < q, so we have a decision. + return true + case 1: + // p > q, so we have a decision. + return false + } + // p == q; try the next comparison. + } + // All comparisons to here said "equal", so just return whatever the + // final comparison reports. + switch ms.cmp[k](p, q) { + case -1: + return true + case 1: + return false + default: + // Still a tie! Now what? + return false + } +} + +// Swap is part of sort.Interface. +func (ms *multiAddrSorter) Swap(i, j int) { + ms.addrs[i], ms.addrs[j] = ms.addrs[j], ms.addrs[i] +} + +const ( + // NOTE (sean@): These constants are here for code readability only and + // are sprucing up the code for readability purposes. Some of the + // Cmp*() variants have confusing logic (especially when dealing with + // mixed-type comparisons) and this, I think, has made it easier to grok + // the code faster. + sortReceiverBeforeArg = -1 + sortDeferDecision = 0 + sortArgBeforeReceiver = 1 +) + +// AscAddress is a sorting function to sort SockAddrs by their respective +// address type. Non-equal types are deferred in the sort. +func AscAddress(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + + switch v := p1.(type) { + case IPv4Addr: + return v.CmpAddress(p2) + case IPv6Addr: + return v.CmpAddress(p2) + case UnixSock: + return v.CmpAddress(p2) + default: + return sortDeferDecision + } +} + +// AscPort is a sorting function to sort SockAddrs by their respective address +// type. Non-equal types are deferred in the sort. +func AscPort(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + + switch v := p1.(type) { + case IPv4Addr: + return v.CmpPort(p2) + case IPv6Addr: + return v.CmpPort(p2) + default: + return sortDeferDecision + } +} + +// AscPrivate is a sorting function to sort "more secure" private values before +// "more public" values. Both IPv4 and IPv6 are compared against RFC6890 +// (RFC6890 includes, and is not limited to, RFC1918 and RFC6598 for IPv4, and +// IPv6 includes RFC4193). +func AscPrivate(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + + switch v := p1.(type) { + case IPv4Addr, IPv6Addr: + return v.CmpRFC(6890, p2) + default: + return sortDeferDecision + } +} + +// AscNetworkSize is a sorting function to sort SockAddrs based on their network +// size. Non-equal types are deferred in the sort. +func AscNetworkSize(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + p1Type := p1.Type() + p2Type := p2.Type() + + // Network size operations on non-IP types make no sense + if p1Type != p2Type && p1Type != TypeIP { + return sortDeferDecision + } + + ipA := p1.(IPAddr) + ipB := p2.(IPAddr) + + return bytes.Compare([]byte(*ipA.NetIPMask()), []byte(*ipB.NetIPMask())) +} + +// AscType is a sorting function to sort "more secure" types before +// "less-secure" types. +func AscType(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + p1Type := p1.Type() + p2Type := p2.Type() + switch { + case p1Type < p2Type: + return sortReceiverBeforeArg + case p1Type == p2Type: + return sortDeferDecision + case p1Type > p2Type: + return sortArgBeforeReceiver + default: + return sortDeferDecision + } +} + +// FilterByType returns two lists: a list of matched and unmatched SockAddrs +func (sas SockAddrs) FilterByType(type_ SockAddrType) (matched, excluded SockAddrs) { + matched = make(SockAddrs, 0, len(sas)) + excluded = make(SockAddrs, 0, len(sas)) + + for _, sa := range sas { + if sa.Type()&type_ != 0 { + matched = append(matched, sa) + } else { + excluded = append(excluded, sa) + } + } + return matched, excluded +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/unixsock.go b/vendor/github.com/hashicorp/go-sockaddr/unixsock.go new file mode 100644 index 00000000000..f3be3f67e77 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/unixsock.go @@ -0,0 +1,135 @@ +package sockaddr + +import ( + "fmt" + "strings" +) + +type UnixSock struct { + SockAddr + path string +} +type UnixSocks []*UnixSock + +// unixAttrMap is a map of the UnixSockAddr type-specific attributes. +var unixAttrMap map[AttrName]func(UnixSock) string +var unixAttrs []AttrName + +func init() { + unixAttrInit() +} + +// NewUnixSock creates an UnixSock from a string path. String can be in the +// form of either URI-based string (e.g. `file:///etc/passwd`), an absolute +// path (e.g. `/etc/passwd`), or a relative path (e.g. `./foo`). +func NewUnixSock(s string) (ret UnixSock, err error) { + ret.path = s + return ret, nil +} + +// CmpAddress follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its name lexically sorts before arg +// - 0 if the SockAddr arg is not a UnixSock, or is a UnixSock with the same path. +// - 1 If the argument should sort first. +func (us UnixSock) CmpAddress(sa SockAddr) int { + usb, ok := sa.(UnixSock) + if !ok { + return sortDeferDecision + } + + return strings.Compare(us.Path(), usb.Path()) +} + +// DialPacketArgs returns the arguments required to be passed to net.DialUnix() +// with the `unixgram` network type. +func (us UnixSock) DialPacketArgs() (network, dialArgs string) { + return "unixgram", us.path +} + +// DialStreamArgs returns the arguments required to be passed to net.DialUnix() +// with the `unix` network type. +func (us UnixSock) DialStreamArgs() (network, dialArgs string) { + return "unix", us.path +} + +// Equal returns true if a SockAddr is equal to the receiving UnixSock. +func (us UnixSock) Equal(sa SockAddr) bool { + usb, ok := sa.(UnixSock) + if !ok { + return false + } + + if us.Path() != usb.Path() { + return false + } + + return true +} + +// ListenPacketArgs returns the arguments required to be passed to +// net.ListenUnixgram() with the `unixgram` network type. +func (us UnixSock) ListenPacketArgs() (network, dialArgs string) { + return "unixgram", us.path +} + +// ListenStreamArgs returns the arguments required to be passed to +// net.ListenUnix() with the `unix` network type. +func (us UnixSock) ListenStreamArgs() (network, dialArgs string) { + return "unix", us.path +} + +// MustUnixSock is a helper method that must return an UnixSock or panic on +// invalid input. +func MustUnixSock(addr string) UnixSock { + us, err := NewUnixSock(addr) + if err != nil { + panic(fmt.Sprintf("Unable to create a UnixSock from %+q: %v", addr, err)) + } + return us +} + +// Path returns the given path of the UnixSock +func (us UnixSock) Path() string { + return us.path +} + +// String returns the path of the UnixSock +func (us UnixSock) String() string { + return fmt.Sprintf("%+q", us.path) +} + +// Type is used as a type switch and returns TypeUnix +func (UnixSock) Type() SockAddrType { + return TypeUnix +} + +// UnixSockAttrs returns a list of attributes supported by the UnixSockAddr type +func UnixSockAttrs() []AttrName { + return unixAttrs +} + +// UnixSockAttr returns a string representation of an attribute for the given +// UnixSock. +func UnixSockAttr(us UnixSock, attrName AttrName) string { + fn, found := unixAttrMap[attrName] + if !found { + return "" + } + + return fn(us) +} + +// unixAttrInit is called once at init() +func unixAttrInit() { + // Sorted for human readability + unixAttrs = []AttrName{ + "path", + } + + unixAttrMap = map[AttrName]func(us UnixSock) string{ + "path": func(us UnixSock) string { + return us.Path() + }, + } +} diff --git a/vendor/github.com/hashicorp/go-uuid/.travis.yml b/vendor/github.com/hashicorp/go-uuid/.travis.yml new file mode 100644 index 00000000000..769849071ed --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/.travis.yml @@ -0,0 +1,12 @@ +language: go + +sudo: false + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +script: + - go test -bench . -benchmem -v ./... diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE new file mode 100644 index 00000000000..a320b309c44 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/LICENSE @@ -0,0 +1,365 @@ +Copyright © 2015-2022 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md new file mode 100644 index 00000000000..fbde8b9aef6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/README.md @@ -0,0 +1,8 @@ +# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid) + +Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes. + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid). diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go new file mode 100644 index 00000000000..0c10c4e9f5f --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/uuid.go @@ -0,0 +1,83 @@ +package uuid + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "io" +) + +// GenerateRandomBytes is used to generate random bytes of given size. +func GenerateRandomBytes(size int) ([]byte, error) { + return GenerateRandomBytesWithReader(size, rand.Reader) +} + +// GenerateRandomBytesWithReader is used to generate random bytes of given size read from a given reader. +func GenerateRandomBytesWithReader(size int, reader io.Reader) ([]byte, error) { + if reader == nil { + return nil, fmt.Errorf("provided reader is nil") + } + buf := make([]byte, size) + if _, err := io.ReadFull(reader, buf); err != nil { + return nil, fmt.Errorf("failed to read random bytes: %v", err) + } + return buf, nil +} + + +const uuidLen = 16 + +// GenerateUUID is used to generate a random UUID +func GenerateUUID() (string, error) { + return GenerateUUIDWithReader(rand.Reader) +} + +// GenerateUUIDWithReader is used to generate a random UUID with a given Reader +func GenerateUUIDWithReader(reader io.Reader) (string, error) { + if reader == nil { + return "", fmt.Errorf("provided reader is nil") + } + buf, err := GenerateRandomBytesWithReader(uuidLen, reader) + if err != nil { + return "", err + } + return FormatUUID(buf) +} + +func FormatUUID(buf []byte) (string, error) { + if buflen := len(buf); buflen != uuidLen { + return "", fmt.Errorf("wrong length byte slice (%d)", buflen) + } + + return fmt.Sprintf("%x-%x-%x-%x-%x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]), nil +} + +func ParseUUID(uuid string) ([]byte, error) { + if len(uuid) != 2 * uuidLen + 4 { + return nil, fmt.Errorf("uuid string is wrong length") + } + + if uuid[8] != '-' || + uuid[13] != '-' || + uuid[18] != '-' || + uuid[23] != '-' { + return nil, fmt.Errorf("uuid is improperly formatted") + } + + hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36] + + ret, err := hex.DecodeString(hexStr) + if err != nil { + return nil, err + } + if len(ret) != uuidLen { + return nil, fmt.Errorf("decoded hex is the wrong length") + } + + return ret, nil +} diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore new file mode 100644 index 00000000000..15586a2b540 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.gitignore @@ -0,0 +1,9 @@ +y.output + +# ignore intellij files +.idea +*.iml +*.ipr +*.iws + +*.test diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml new file mode 100644 index 00000000000..cb63a32161b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.travis.yml @@ -0,0 +1,13 @@ +sudo: false + +language: go + +go: + - 1.x + - tip + +branches: + only: + - master + +script: make test diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE new file mode 100644 index 00000000000..c33dcc7c928 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile new file mode 100644 index 00000000000..84fd743f5cc --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/Makefile @@ -0,0 +1,18 @@ +TEST?=./... + +default: test + +fmt: generate + go fmt ./... + +test: generate + go get -t ./... + go test $(TEST) $(TESTARGS) + +generate: + go generate ./... + +updatedeps: + go get -u golang.org/x/tools/cmd/stringer + +.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md new file mode 100644 index 00000000000..c8223326ddc --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/README.md @@ -0,0 +1,125 @@ +# HCL + +[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) + +HCL (HashiCorp Configuration Language) is a configuration language built +by HashiCorp. The goal of HCL is to build a structured configuration language +that is both human and machine friendly for use with command-line tools, but +specifically targeted towards DevOps tools, servers, etc. + +HCL is also fully JSON compatible. That is, JSON can be used as completely +valid input to a system expecting HCL. This helps makes systems +interoperable with other systems. + +HCL is heavily inspired by +[libucl](https://github.com/vstakhov/libucl), +nginx configuration, and others similar. + +## Why? + +A common question when viewing HCL is to ask the question: why not +JSON, YAML, etc.? + +Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) +used a variety of configuration languages from full programming languages +such as Ruby to complete data structure languages such as JSON. What we +learned is that some people wanted human-friendly configuration languages +and some people wanted machine-friendly languages. + +JSON fits a nice balance in this, but is fairly verbose and most +importantly doesn't support comments. With YAML, we found that beginners +had a really hard time determining what the actual structure was, and +ended up guessing more often than not whether to use a hyphen, colon, etc. +in order to represent some configuration key. + +Full programming languages such as Ruby enable complex behavior +a configuration language shouldn't usually allow, and also forces +people to learn some set of Ruby. + +Because of this, we decided to create our own configuration language +that is JSON-compatible. Our configuration language (HCL) is designed +to be written and modified by humans. The API for HCL allows JSON +as an input so that it is also machine-friendly (machines can generate +JSON instead of trying to generate HCL). + +Our goal with HCL is not to alienate other configuration languages. +It is instead to provide HCL as a specialized language for our tools, +and JSON as the interoperability layer. + +## Syntax + +For a complete grammar, please see the parser itself. A high-level overview +of the syntax and grammar is listed here. + + * Single line comments start with `#` or `//` + + * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments + are not allowed. A multi-line comment (also known as a block comment) + terminates at the first `*/` found. + + * Values are assigned with the syntax `key = value` (whitespace doesn't + matter). The value can be any primitive: a string, number, boolean, + object, or list. + + * Strings are double-quoted and can contain any UTF-8 characters. + Example: `"Hello, World"` + + * Multi-line strings start with `<- + echo %Path% + + go version + + go env + + go get -t ./... + +build_script: +- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go new file mode 100644 index 00000000000..bed9ebbe141 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -0,0 +1,729 @@ +package hcl + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" + "github.com/hashicorp/hcl/hcl/token" +) + +// This is the tag to use with structures to have settings for HCL +const tagName = "hcl" + +var ( + // nodeType holds a reference to the type of ast.Node + nodeType reflect.Type = findNodeType() +) + +// Unmarshal accepts a byte slice as input and writes the +// data to the value pointed to by v. +func Unmarshal(bs []byte, v interface{}) error { + root, err := parse(bs) + if err != nil { + return err + } + + return DecodeObject(v, root) +} + +// Decode reads the given input and decodes it into the structure +// given by `out`. +func Decode(out interface{}, in string) error { + obj, err := Parse(in) + if err != nil { + return err + } + + return DecodeObject(out, obj) +} + +// DecodeObject is a lower-level version of Decode. It decodes a +// raw Object into the given output. +func DecodeObject(out interface{}, n ast.Node) error { + val := reflect.ValueOf(out) + if val.Kind() != reflect.Ptr { + return errors.New("result must be a pointer") + } + + // If we have the file, we really decode the root node + if f, ok := n.(*ast.File); ok { + n = f.Node + } + + var d decoder + return d.decode("root", n, val.Elem()) +} + +type decoder struct { + stack []reflect.Kind +} + +func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { + k := result + + // If we have an interface with a valid value, we use that + // for the check. + if result.Kind() == reflect.Interface { + elem := result.Elem() + if elem.IsValid() { + k = elem + } + } + + // Push current onto stack unless it is an interface. + if k.Kind() != reflect.Interface { + d.stack = append(d.stack, k.Kind()) + + // Schedule a pop + defer func() { + d.stack = d.stack[:len(d.stack)-1] + }() + } + + switch k.Kind() { + case reflect.Bool: + return d.decodeBool(name, node, result) + case reflect.Float32, reflect.Float64: + return d.decodeFloat(name, node, result) + case reflect.Int, reflect.Int32, reflect.Int64: + return d.decodeInt(name, node, result) + case reflect.Interface: + // When we see an interface, we make our own thing + return d.decodeInterface(name, node, result) + case reflect.Map: + return d.decodeMap(name, node, result) + case reflect.Ptr: + return d.decodePtr(name, node, result) + case reflect.Slice: + return d.decodeSlice(name, node, result) + case reflect.String: + return d.decodeString(name, node, result) + case reflect.Struct: + return d.decodeStruct(name, node, result) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), + } + } +} + +func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.BOOL { + v, err := strconv.ParseBool(n.Token.Text) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v)) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER { + v, err := strconv.ParseFloat(n.Token.Text, 64) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + v, err := strconv.ParseInt(n.Token.Text, 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + case token.STRING: + v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { + // When we see an ast.Node, we retain the value to enable deferred decoding. + // Very useful in situations where we want to preserve ast.Node information + // like Pos + if result.Type() == nodeType && result.CanSet() { + result.Set(reflect.ValueOf(node)) + return nil + } + + var set reflect.Value + redecode := true + + // For testing types, ObjectType should just be treated as a list. We + // set this to a temporary var because we want to pass in the real node. + testNode := node + if ot, ok := node.(*ast.ObjectType); ok { + testNode = ot.List + } + + switch n := testNode.(type) { + case *ast.ObjectList: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) + set = result + } + case *ast.ObjectType: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 1) + set = result + } + case *ast.ListType: + var temp []interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 0) + set = result + case *ast.LiteralType: + switch n.Token.Type { + case token.BOOL: + var result bool + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.FLOAT: + var result float64 + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.NUMBER: + var result int + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.STRING, token.HEREDOC: + set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), + } + } + default: + return fmt.Errorf( + "%s: cannot decode into interface: %T", + name, node) + } + + // Set the result to what its supposed to be, then reset + // result so we don't reflect into this method anymore. + result.Set(set) + + if redecode { + // Revisit the node so that we can use the newly instantiated + // thing and populate it. + if err := d.decode(name, node, result); err != nil { + return err + } + } + + return nil +} + +func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { + if item, ok := node.(*ast.ObjectItem); ok { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + n, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), + } + } + + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + + resultType := result.Type() + resultElemType := resultType.Elem() + resultKeyType := resultType.Key() + if resultKeyType.Kind() != reflect.String { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Make a map if it is nil + resultMap := result + if result.IsNil() { + resultMap = reflect.MakeMap( + reflect.MapOf(resultKeyType, resultElemType)) + } + + // Go through each element and decode it. + done := make(map[string]struct{}) + for _, item := range n.Items { + if item.Val == nil { + continue + } + + // github.com/hashicorp/terraform/issue/5740 + if len(item.Keys) == 0 { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Get the key we're dealing with, which is the first item + keyStr := item.Keys[0].Token.Value().(string) + + // If we've already processed this key, then ignore it + if _, ok := done[keyStr]; ok { + continue + } + + // Determine the value. If we have more than one key, then we + // get the objectlist of only these keys. + itemVal := item.Val + if len(item.Keys) > 1 { + itemVal = n.Filter(keyStr) + done[keyStr] = struct{}{} + } + + // Make the field name + fieldName := fmt.Sprintf("%s.%s", name, keyStr) + + // Get the key/value as reflection values + key := reflect.ValueOf(keyStr) + val := reflect.Indirect(reflect.New(resultElemType)) + + // If we have a pre-existing value in the map, use that + oldVal := resultMap.MapIndex(key) + if oldVal.IsValid() { + val.Set(oldVal) + } + + // Decode! + if err := d.decode(fieldName, itemVal, val); err != nil { + return err + } + + // Set the value on the map + resultMap.SetMapIndex(key, val) + } + + // Set the final map if we can + set.Set(resultMap) + return nil +} + +func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + resultType := result.Type() + resultElemType := resultType.Elem() + val := reflect.New(resultElemType) + if err := d.decode(name, node, reflect.Indirect(val)); err != nil { + return err + } + + result.Set(val) + return nil +} + +func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + // Create the slice if it isn't nil + resultType := result.Type() + resultElemType := resultType.Elem() + if result.IsNil() { + resultSliceType := reflect.SliceOf(resultElemType) + result = reflect.MakeSlice( + resultSliceType, 0, 0) + } + + // Figure out the items we'll be copying into the slice + var items []ast.Node + switch n := node.(type) { + case *ast.ObjectList: + items = make([]ast.Node, len(n.Items)) + for i, item := range n.Items { + items[i] = item + } + case *ast.ObjectType: + items = []ast.Node{n} + case *ast.ListType: + items = n.List + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("unknown slice type: %T", node), + } + } + + for i, item := range items { + fieldName := fmt.Sprintf("%s[%d]", name, i) + + // Decode + val := reflect.Indirect(reflect.New(resultElemType)) + + // if item is an object that was decoded from ambiguous JSON and + // flattened, make sure it's expanded if it needs to decode into a + // defined structure. + item := expandObject(item, val) + + if err := d.decode(fieldName, item, val); err != nil { + return err + } + + // Append it onto the slice + result = reflect.Append(result, val) + } + + set.Set(result) + return nil +} + +// expandObject detects if an ambiguous JSON object was flattened to a List which +// should be decoded into a struct, and expands the ast to properly deocode. +func expandObject(node ast.Node, result reflect.Value) ast.Node { + item, ok := node.(*ast.ObjectItem) + if !ok { + return node + } + + elemType := result.Type() + + // our target type must be a struct + switch elemType.Kind() { + case reflect.Ptr: + switch elemType.Elem().Kind() { + case reflect.Struct: + //OK + default: + return node + } + case reflect.Struct: + //OK + default: + return node + } + + // A list value will have a key and field name. If it had more fields, + // it wouldn't have been flattened. + if len(item.Keys) != 2 { + return node + } + + keyToken := item.Keys[0].Token + item.Keys = item.Keys[1:] + + // we need to un-flatten the ast enough to decode + newNode := &ast.ObjectItem{ + Keys: []*ast.ObjectKey{ + &ast.ObjectKey{ + Token: keyToken, + }, + }, + Val: &ast.ObjectType{ + List: &ast.ObjectList{ + Items: []*ast.ObjectItem{item}, + }, + }, + } + + return newNode +} + +func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) + return nil + case token.STRING, token.HEREDOC: + result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type for string %T", name, node), + } +} + +func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { + var item *ast.ObjectItem + if it, ok := node.(*ast.ObjectItem); ok { + item = it + node = it.Val + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + // Handle the special case where the object itself is a literal. Previously + // the yacc parser would always ensure top-level elements were arrays. The new + // parser does not make the same guarantees, thus we need to convert any + // top-level literal elements into a list. + if _, ok := node.(*ast.LiteralType); ok && item != nil { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + list, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), + } + } + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = result + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") + + // Ignore fields with tag name "-" + if tagParts[0] == "-" { + continue + } + + if fieldType.Anonymous { + fieldKind := fieldType.Type.Kind() + if fieldKind != reflect.Struct { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unsupported type to struct: %s", + fieldType.Name, fieldKind), + } + } + + // We have an embedded field. We "squash" the fields down + // if specified in the tag. + squash := false + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + structs = append( + structs, result.FieldByName(fieldType.Name)) + continue + } + } + + // Normal struct field, store it away + fields = append(fields, field{fieldType, structVal.Field(i)}) + } + } + + usedKeys := make(map[string]struct{}) + decodedFields := make([]string, 0, len(fields)) + decodedFieldsVal := make([]reflect.Value, 0) + unusedKeysVal := make([]reflect.Value, 0) + for _, f := range fields { + field, fieldValue := f.field, f.val + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + fieldName := field.Name + + tagValue := field.Tag.Get(tagName) + tagParts := strings.SplitN(tagValue, ",", 2) + if len(tagParts) >= 2 { + switch tagParts[1] { + case "decodedFields": + decodedFieldsVal = append(decodedFieldsVal, fieldValue) + continue + case "key": + if item == nil { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: %s asked for 'key', impossible", + name, fieldName), + } + } + + fieldValue.SetString(item.Keys[0].Token.Value().(string)) + continue + case "unusedKeys": + unusedKeysVal = append(unusedKeysVal, fieldValue) + continue + } + } + + if tagParts[0] != "" { + fieldName = tagParts[0] + } + + // Determine the element we'll use to decode. If it is a single + // match (only object with the field), then we decode it exactly. + // If it is a prefix match, then we decode the matches. + filter := list.Filter(fieldName) + + prefixMatches := filter.Children() + matches := filter.Elem() + if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { + continue + } + + // Track the used key + usedKeys[fieldName] = struct{}{} + + // Create the field name and decode. We range over the elements + // because we actually want the value. + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + if len(prefixMatches.Items) > 0 { + if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil { + return err + } + } + for _, match := range matches.Items { + var decodeNode ast.Node = match.Val + if ot, ok := decodeNode.(*ast.ObjectType); ok { + decodeNode = &ast.ObjectList{Items: ot.List.Items} + } + + if err := d.decode(fieldName, decodeNode, fieldValue); err != nil { + return err + } + } + + decodedFields = append(decodedFields, field.Name) + } + + if len(decodedFieldsVal) > 0 { + // Sort it so that it is deterministic + sort.Strings(decodedFields) + + for _, v := range decodedFieldsVal { + v.Set(reflect.ValueOf(decodedFields)) + } + } + + return nil +} + +// findNodeType returns the type of ast.Node +func findNodeType() reflect.Type { + var nodeContainer struct { + Node ast.Node + } + value := reflect.ValueOf(nodeContainer).FieldByName("Node") + return value.Type() +} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go new file mode 100644 index 00000000000..575a20b50b5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl.go @@ -0,0 +1,11 @@ +// Package hcl decodes HCL into usable Go structures. +// +// hcl input can come in either pure HCL format or JSON format. +// It can be parsed into an AST, and then decoded into a structure, +// or it can be decoded directly from a string into a structure. +// +// If you choose to parse HCL into a raw AST, the benefit is that you +// can write custom visitor implementations to implement custom +// semantic checks. By default, HCL does not perform any semantic +// checks. +package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go new file mode 100644 index 00000000000..6e5ef654bb8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go @@ -0,0 +1,219 @@ +// Package ast declares the types used to represent syntax trees for HCL +// (HashiCorp Configuration Language) +package ast + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/token" +) + +// Node is an element in the abstract syntax tree. +type Node interface { + node() + Pos() token.Pos +} + +func (File) node() {} +func (ObjectList) node() {} +func (ObjectKey) node() {} +func (ObjectItem) node() {} +func (Comment) node() {} +func (CommentGroup) node() {} +func (ObjectType) node() {} +func (LiteralType) node() {} +func (ListType) node() {} + +// File represents a single HCL file +type File struct { + Node Node // usually a *ObjectList + Comments []*CommentGroup // list of all comments in the source +} + +func (f *File) Pos() token.Pos { + return f.Node.Pos() +} + +// ObjectList represents a list of ObjectItems. An HCL file itself is an +// ObjectList. +type ObjectList struct { + Items []*ObjectItem +} + +func (o *ObjectList) Add(item *ObjectItem) { + o.Items = append(o.Items, item) +} + +// Filter filters out the objects with the given key list as a prefix. +// +// The returned list of objects contain ObjectItems where the keys have +// this prefix already stripped off. This might result in objects with +// zero-length key lists if they have no children. +// +// If no matches are found, an empty ObjectList (non-nil) is returned. +func (o *ObjectList) Filter(keys ...string) *ObjectList { + var result ObjectList + for _, item := range o.Items { + // If there aren't enough keys, then ignore this + if len(item.Keys) < len(keys) { + continue + } + + match := true + for i, key := range item.Keys[:len(keys)] { + key := key.Token.Value().(string) + if key != keys[i] && !strings.EqualFold(key, keys[i]) { + match = false + break + } + } + if !match { + continue + } + + // Strip off the prefix from the children + newItem := *item + newItem.Keys = newItem.Keys[len(keys):] + result.Add(&newItem) + } + + return &result +} + +// Children returns further nested objects (key length > 0) within this +// ObjectList. This should be used with Filter to get at child items. +func (o *ObjectList) Children() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) > 0 { + result.Add(item) + } + } + + return &result +} + +// Elem returns items in the list that are direct element assignments +// (key length == 0). This should be used with Filter to get at elements. +func (o *ObjectList) Elem() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) == 0 { + result.Add(item) + } + } + + return &result +} + +func (o *ObjectList) Pos() token.Pos { + // always returns the uninitiliazed position + return o.Items[0].Pos() +} + +// ObjectItem represents a HCL Object Item. An item is represented with a key +// (or keys). It can be an assignment or an object (both normal and nested) +type ObjectItem struct { + // keys is only one length long if it's of type assignment. If it's a + // nested object it can be larger than one. In that case "assign" is + // invalid as there is no assignments for a nested object. + Keys []*ObjectKey + + // assign contains the position of "=", if any + Assign token.Pos + + // val is the item itself. It can be an object,list, number, bool or a + // string. If key length is larger than one, val can be only of type + // Object. + Val Node + + LeadComment *CommentGroup // associated lead comment + LineComment *CommentGroup // associated line comment +} + +func (o *ObjectItem) Pos() token.Pos { + // I'm not entirely sure what causes this, but removing this causes + // a test failure. We should investigate at some point. + if len(o.Keys) == 0 { + return token.Pos{} + } + + return o.Keys[0].Pos() +} + +// ObjectKeys are either an identifier or of type string. +type ObjectKey struct { + Token token.Token +} + +func (o *ObjectKey) Pos() token.Pos { + return o.Token.Pos +} + +// LiteralType represents a literal of basic type. Valid types are: +// token.NUMBER, token.FLOAT, token.BOOL and token.STRING +type LiteralType struct { + Token token.Token + + // comment types, only used when in a list + LeadComment *CommentGroup + LineComment *CommentGroup +} + +func (l *LiteralType) Pos() token.Pos { + return l.Token.Pos +} + +// ListStatement represents a HCL List type +type ListType struct { + Lbrack token.Pos // position of "[" + Rbrack token.Pos // position of "]" + List []Node // the elements in lexical order +} + +func (l *ListType) Pos() token.Pos { + return l.Lbrack +} + +func (l *ListType) Add(node Node) { + l.List = append(l.List, node) +} + +// ObjectType represents a HCL Object Type +type ObjectType struct { + Lbrace token.Pos // position of "{" + Rbrace token.Pos // position of "}" + List *ObjectList // the nodes in lexical order +} + +func (o *ObjectType) Pos() token.Pos { + return o.Lbrace +} + +// Comment node represents a single //, # style or /*- style commment +type Comment struct { + Start token.Pos // position of / or # + Text string +} + +func (c *Comment) Pos() token.Pos { + return c.Start +} + +// CommentGroup node represents a sequence of comments with no other tokens and +// no empty lines between. +type CommentGroup struct { + List []*Comment // len(List) > 0 +} + +func (c *CommentGroup) Pos() token.Pos { + return c.List[0].Pos() +} + +//------------------------------------------------------------------- +// GoStringer +//------------------------------------------------------------------- + +func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } +func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go new file mode 100644 index 00000000000..ba07ad42b02 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go @@ -0,0 +1,52 @@ +package ast + +import "fmt" + +// WalkFunc describes a function to be called for each node during a Walk. The +// returned node can be used to rewrite the AST. Walking stops the returned +// bool is false. +type WalkFunc func(Node) (Node, bool) + +// Walk traverses an AST in depth-first order: It starts by calling fn(node); +// node must not be nil. If fn returns true, Walk invokes fn recursively for +// each of the non-nil children of node, followed by a call of fn(nil). The +// returned node of fn can be used to rewrite the passed node to fn. +func Walk(node Node, fn WalkFunc) Node { + rewritten, ok := fn(node) + if !ok { + return rewritten + } + + switch n := node.(type) { + case *File: + n.Node = Walk(n.Node, fn) + case *ObjectList: + for i, item := range n.Items { + n.Items[i] = Walk(item, fn).(*ObjectItem) + } + case *ObjectKey: + // nothing to do + case *ObjectItem: + for i, k := range n.Keys { + n.Keys[i] = Walk(k, fn).(*ObjectKey) + } + + if n.Val != nil { + n.Val = Walk(n.Val, fn) + } + case *LiteralType: + // nothing to do + case *ListType: + for i, l := range n.List { + n.List[i] = Walk(l, fn) + } + case *ObjectType: + n.List = Walk(n.List, fn).(*ObjectList) + default: + // should we panic here? + fmt.Printf("unknown type: %T\n", n) + } + + fn(nil) + return rewritten +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go new file mode 100644 index 00000000000..5c99381dfbf --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go @@ -0,0 +1,17 @@ +package parser + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/token" +) + +// PosError is a parse error that contains a position. +type PosError struct { + Pos token.Pos + Err error +} + +func (e *PosError) Error() string { + return fmt.Sprintf("At %s: %s", e.Pos, e.Err) +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go new file mode 100644 index 00000000000..64c83bcfb55 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -0,0 +1,532 @@ +// Package parser implements a parser for HCL (HashiCorp Configuration +// Language) +package parser + +import ( + "bytes" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/scanner" + "github.com/hashicorp/hcl/hcl/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + comments []*ast.CommentGroup + leadComment *ast.CommentGroup // last lead comment + lineComment *ast.CommentGroup // last line comment + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + // normalize all line endings + // since the scanner and output only work with "\n" line endings, we may + // end up with dangling "\r" characters in the parsed data. + src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) + + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = &PosError{Pos: pos, Err: errors.New(msg)} + } + + f.Node, err = p.objectList(false) + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + f.Comments = p.comments + return f, nil +} + +// objectList parses a list of items within an object (generally k/v pairs). +// The parameter" obj" tells this whether to we are within an object (braces: +// '{', '}') or just at the top level. If we're within an object, we end +// at an RBRACE. +func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + if obj { + tok := p.scan() + p.unscan() + if tok.Type == token.RBRACE { + break + } + } + + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // object lists can be optionally comma-delimited e.g. when a list of maps + // is being expressed, so a comma is allowed here - it's simply consumed + tok := p.scan() + if tok.Type != token.COMMA { + p.unscan() + } + } + return node, nil +} + +func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { + endline = p.tok.Pos.Line + + // count the endline if it's multiline comment, ie starting with /* + if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { + // don't use range here - no need to decode Unicode code points + for i := 0; i < len(p.tok.Text); i++ { + if p.tok.Text[i] == '\n' { + endline++ + } + } + } + + comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} + p.tok = p.sc.Scan() + return +} + +func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { + var list []*ast.Comment + endline = p.tok.Pos.Line + + for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { + var comment *ast.Comment + comment, endline = p.consumeComment() + list = append(list, comment) + } + + // add comment group to the comments list + comments = &ast.CommentGroup{List: list} + p.comments = append(p.comments, comments) + + return +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if len(keys) > 0 && err == errEofToken { + // We ignore eof token here since it is an error if we didn't + // receive a value (but we did receive a key) for the item. + err = nil + } + if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { + // This is a strange boolean statement, but what it means is: + // We have keys with no value, and we're likely in an object + // (since RBrace ends an object). For this, we set err to nil so + // we continue and get the error below of having the wrong value + // type. + err = nil + + // Reset the token type so we don't think it completed fine. See + // objectType which uses p.tok.Type to check if we're done with + // the object. + p.tok.Type = token.EOF + } + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + if p.leadComment != nil { + o.LeadComment = p.leadComment + p.leadComment = nil + } + + switch p.tok.Type { + case token.ASSIGN: + o.Assign = p.tok.Pos + o.Val, err = p.object() + if err != nil { + return nil, err + } + case token.LBRACE: + o.Val, err = p.objectType() + if err != nil { + return nil, err + } + default: + keyStr := make([]string, 0, len(keys)) + for _, k := range keys { + keyStr = append(keyStr, k.Token.Text) + } + + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf( + "key '%s' expected start of object ('{') or assignment ('=')", + strings.Join(keyStr, " ")), + } + } + + // key=#comment + // val + if p.lineComment != nil { + o.LineComment, p.lineComment = p.lineComment, nil + } + + // do a look-ahead for line comment + p.scan() + if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { + o.LineComment = p.lineComment + p.lineComment = nil + } + p.unscan() + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + // It is very important to also return the keys here as well as + // the error. This is because we need to be able to tell if we + // did parse keys prior to finding the EOF, or if we just found + // a bare EOF. + return keys, errEofToken + case token.ASSIGN: + // assignment or object only, but not nested objects. this is not + // allowed: `foo bar = {}` + if keyCount > 1 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), + } + } + + if keyCount == 0 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: errors.New("no object keys found!"), + } + } + + return keys, nil + case token.LBRACE: + var err error + + // If we have no keys, then it is a syntax error. i.e. {{}} is not + // allowed. + if len(keys) == 0 { + err = &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), + } + } + + // object + return keys, err + case token.IDENT, token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{Token: p.tok}) + case token.ILLEGAL: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("illegal character"), + } + default: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), + } + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (ast.Node, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.COMMENT: + // implement comment + case token.EOF: + return nil, errEofToken + } + + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("Unknown token: %+v", tok), + } +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{ + Lbrace: p.tok.Pos, + } + + l, err := p.objectList(true) + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + // No error, scan and expect the ending to be a brace + if tok := p.scan(); tok.Type != token.RBRACE { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), + } + } + + o.List = l + o.Rbrace = p.tok.Pos // advanced via parseObjectList + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{ + Lbrack: p.tok.Pos, + } + + needComma := false + for { + tok := p.scan() + if needComma { + switch tok.Type { + case token.COMMA, token.RBRACK: + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error parsing list, expected comma or list end, got: %s", + tok.Type), + } + } + } + switch tok.Type { + case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: + node, err := p.literalType() + if err != nil { + return nil, err + } + + // If there is a lead comment, apply it + if p.leadComment != nil { + node.LeadComment = p.leadComment + p.leadComment = nil + } + + l.Add(node) + needComma = true + case token.COMMA: + // get next list item or we are at the end + // do a look-ahead for line comment + p.scan() + if p.lineComment != nil && len(l.List) > 0 { + lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) + if ok { + lit.LineComment = p.lineComment + l.List[len(l.List)-1] = lit + p.lineComment = nil + } + } + p.unscan() + + needComma = false + continue + case token.LBRACE: + // Looks like a nested object, so parse it out + node, err := p.objectType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse object within list: %s", err), + } + } + l.Add(node) + needComma = true + case token.LBRACK: + node, err := p.listType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse list within list: %s", err), + } + } + l.Add(node) + case token.RBRACK: + // finished + l.Rbrack = p.tok.Pos + return l, nil + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), + } + } + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok, + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. In the process, it collects any +// comment groups encountered, and remembers the last lead and line comments. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + // Otherwise read the next token from the scanner and Save it to the buffer + // in case we unscan later. + prev := p.tok + p.tok = p.sc.Scan() + + if p.tok.Type == token.COMMENT { + var comment *ast.CommentGroup + var endline int + + // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", + // p.tok.Pos.Line, prev.Pos.Line, endline) + if p.tok.Pos.Line == prev.Pos.Line { + // The comment is on same line as the previous token; it + // cannot be a lead comment but may be a line comment. + comment, endline = p.consumeCommentGroup(0) + if p.tok.Pos.Line != endline { + // The next token is on a different line, thus + // the last comment group is a line comment. + p.lineComment = comment + } + } + + // consume successor comments, if any + endline = -1 + for p.tok.Type == token.COMMENT { + comment, endline = p.consumeCommentGroup(1) + } + + if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { + switch p.tok.Type { + case token.RBRACE, token.RBRACK: + // Do not count for these cases + default: + // The next token is following on the line immediately after the + // comment group, thus the last comment group is a lead comment. + p.leadComment = comment + } + } + + } + + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go new file mode 100644 index 00000000000..624a18fe3a7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go @@ -0,0 +1,652 @@ +// Package scanner implements a scanner for HCL (HashiCorp Configuration +// Language) source text. +package scanner + +import ( + "bytes" + "fmt" + "os" + "regexp" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/hcl/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == utf8.RuneError && size == 1 { + s.err("illegal UTF-8 encoding") + return ch + } + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + if ch == '\x00' { + s.err("unexpected null character (0x00)") + return eof + } + + if ch == '\uE123' { + s.err("unicode code point U+E123 reserved for internal use") + return utf8.RuneError + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + tok = token.IDENT + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '#', '/': + tok = token.COMMENT + s.scanComment(ch) + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '<': + tok = token.HEREDOC + s.scanHeredoc() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case '=': + tok = token.ASSIGN + case '+': + tok = token.ADD + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + tok = token.SUB + } + default: + s.err("illegal char") + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +func (s *Scanner) scanComment(ch rune) { + // single line comments + if ch == '#' || (ch == '/' && s.peek() != '*') { + if ch == '/' && s.peek() != '/' { + s.err("expected '/' for comment") + return + } + + ch = s.next() + for ch != '\n' && ch >= 0 && ch != eof { + ch = s.next() + } + if ch != eof && ch >= 0 { + s.unread() + } + return + } + + // be sure we get the character after /* This allows us to find comment's + // that are not erminated + if ch == '/' { + s.next() + ch = s.next() // read character after "/*" + } + + // look for /* - style comments + for { + if ch < 0 || ch == eof { + s.err("comment not terminated") + break + } + + ch0 := ch + ch = s.next() + if ch0 == '*' && ch == '/' { + break + } + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + if ch == '0' { + // check for hexadecimal, octal or float + ch = s.next() + if ch == 'x' || ch == 'X' { + // hexadecimal + ch = s.next() + found := false + for isHexadecimal(ch) { + ch = s.next() + found = true + } + + if !found { + s.err("illegal hexadecimal number") + } + + if ch != eof { + s.unread() + } + + return token.NUMBER + } + + // now it's either something like: 0421(octal) or 0.1231(float) + illegalOctal := false + for isDecimal(ch) { + ch = s.next() + if ch == '8' || ch == '9' { + // this is just a possibility. For example 0159 is illegal, but + // 0159.23 is valid. So we mark a possible illegal octal. If + // the next character is not a period, we'll print the error. + illegalOctal = true + } + } + + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if illegalOctal { + s.err("illegal octal number") + } + + if ch != eof { + s.unread() + } + return token.NUMBER + } + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + return token.NUMBER +} + +// scanMantissa scans the mantissa beginning from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanHeredoc scans a heredoc string +func (s *Scanner) scanHeredoc() { + // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { + break + } + + // Not an anchor match, record the start of a new line + lineStart = s.srcPos.Offset + } + + if ch == eof { + s.err("heredoc not terminated") + return + } + } + + return +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' && braces == 0 { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + start := n + for n > 0 && digitVal(ch) < base { + ch = s.next() + if ch == eof { + // If we see an EOF, we halt any more scanning of digits + // immediately. + break + } + + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + if n != start && ch != eof { + // we scanned all digits, put the last non digit char back, + // only if we read anything at all + s.unread() + } + + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isDigit returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isDecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go new file mode 100644 index 00000000000..5f981eaa2f0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -0,0 +1,241 @@ +package strconv + +import ( + "errors" + "unicode/utf8" +) + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// Unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +func Unquote(s string) (t string, err error) { + n := len(s) + if n < 2 { + return "", ErrSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", ErrSyntax + } + s = s[1 : n-1] + + if quote != '"' { + return "", ErrSyntax + } + if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { + return "", ErrSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { + switch quote { + case '"': + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + // If we're starting a '${}' then let it through un-unquoted. + // Specifically: we don't unquote any characters within the `${}` + // section. + if s[0] == '$' && len(s) > 1 && s[1] == '{' { + buf = append(buf, '$', '{') + s = s[2:] + + // Continue reading until we find the closing brace, copying as-is + braces := 1 + for len(s) > 0 && braces > 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError { + return "", ErrSyntax + } + + s = s[size:] + + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + + switch r { + case '{': + braces++ + case '}': + braces-- + } + } + if braces != 0 { + return "", ErrSyntax + } + if len(s) == 0 { + // If there's no string left, we're done! + break + } else { + // If there's more left, we need to pop back up to the top of the loop + // in case there's another interpolation in this string. + continue + } + } + + if s[0] == '\n' { + return "", ErrSyntax + } + + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", ErrSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} + +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"'): + err = ErrSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = ErrSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = ErrSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = ErrSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = ErrSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = ErrSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = ErrSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = ErrSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"': + if c != quote { + err = ErrSyntax + return + } + value = rune(c) + default: + err = ErrSyntax + return + } + tail = s + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go new file mode 100644 index 00000000000..59c1bb72d4a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go new file mode 100644 index 00000000000..e37c0664ecd --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go @@ -0,0 +1,219 @@ +// Package token defines constants representing the lexical tokens for HCL +// (HashiCorp Configuration Language) +package token + +import ( + "fmt" + "strconv" + "strings" + + hclstrconv "github.com/hashicorp/hcl/hcl/strconv" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string + JSON bool +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + COMMENT + + identifier_beg + IDENT // literals + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + HEREDOC // < 0 { + // Pop the current item + n := len(frontier) + item := frontier[n-1] + frontier = frontier[:n-1] + + switch v := item.Val.(type) { + case *ast.ObjectType: + items, frontier = flattenObjectType(v, item, items, frontier) + case *ast.ListType: + items, frontier = flattenListType(v, item, items, frontier) + default: + items = append(items, item) + } + } + + // Reverse the list since the frontier model runs things backwards + for i := len(items)/2 - 1; i >= 0; i-- { + opp := len(items) - 1 - i + items[i], items[opp] = items[opp], items[i] + } + + // Done! Set the original items + list.Items = items + return n, true + }) +} + +func flattenListType( + ot *ast.ListType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list is empty, keep the original list + if len(ot.List) == 0 { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List { + if _, ok := subitem.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, elem := range ot.List { + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: item.Keys, + Assign: item.Assign, + Val: elem, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} + +func flattenObjectType( + ot *ast.ObjectType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list has no items we do not have to flatten anything + if ot.List.Items == nil { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List.Items { + if _, ok := subitem.Val.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, subitem := range ot.List.Items { + // Copy the new key + keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) + copy(keys, item.Keys) + copy(keys[len(item.Keys):], subitem.Keys) + + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: keys, + Assign: item.Assign, + Val: subitem.Val, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go new file mode 100644 index 00000000000..125a5f07298 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go @@ -0,0 +1,313 @@ +package parser + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hcltoken "github.com/hashicorp/hcl/hcl/token" + "github.com/hashicorp/hcl/json/scanner" + "github.com/hashicorp/hcl/json/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = fmt.Errorf("%s: %s", pos, msg) + } + + // The root must be an object in JSON + object, err := p.object() + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + // We make our final node an object list so it is more HCL compatible + f.Node = object.List + + // Flatten it, which finds patterns and turns them into more HCL-like + // AST trees. + flattenObjects(f.Node) + + return f, nil +} + +func (p *Parser) objectList() (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // Check for a followup comma. If it isn't a comma, then we're done + if tok := p.scan(); tok.Type != token.COMMA { + break + } + } + + return node, nil +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + switch p.tok.Type { + case token.COLON: + pos := p.tok.Pos + o.Assign = hcltoken.Pos{ + Filename: pos.Filename, + Offset: pos.Offset, + Line: pos.Line, + Column: pos.Column, + } + + o.Val, err = p.objectValue() + if err != nil { + return nil, err + } + } + + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + return nil, errEofToken + case token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{ + Token: p.tok.HCLToken(), + }) + case token.COLON: + // If we have a zero keycount it means that we never got + // an object key, i.e. `{ :`. This is a syntax error. + if keyCount == 0 { + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + + // Done + return keys, nil + case token.ILLEGAL: + return nil, errors.New("illegal") + default: + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) objectValue() (ast.Node, error) { + defer un(trace(p, "ParseObjectValue")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (*ast.ObjectType, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.LBRACE: + return p.objectType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{} + + l, err := p.objectList() + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + o.List = l + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{} + + for { + tok := p.scan() + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING: + node, err := p.literalType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.COMMA: + continue + case token.LBRACE: + node, err := p.objectType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.BOOL: + // TODO(arslan) should we support? not supported by HCL yet + case token.LBRACK: + // TODO(arslan) should we support nested lists? Even though it's + // written in README of HCL, it's not a part of the grammar + // (not defined in parse.y) + case token.RBRACK: + // finished + return l, nil + default: + return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) + } + + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok.HCLToken(), + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + p.tok = p.sc.Scan() + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go new file mode 100644 index 00000000000..fe3f0f09502 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go @@ -0,0 +1,451 @@ +package scanner + +import ( + "bytes" + "fmt" + "os" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/json/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } else if lit == "null" { + tok = token.NULL + } else { + s.err("illegal char") + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case ':': + tok = token.COLON + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + s.err("illegal char") + } + default: + s.err("illegal char: " + string(ch)) + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + zero := ch == '0' + pos := s.srcPos + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + + // If we have a larger number and this is zero, error + if zero && pos != s.srcPos { + s.err("numbers cannot start with 0") + } + + return token.NUMBER +} + +// scanMantissa scans the mantissa beginning from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if ch == '\n' || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + // we scanned all digits, put the last non digit char back + s.unread() + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isHexadecimal returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isHexadecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go new file mode 100644 index 00000000000..59c1bb72d4a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go new file mode 100644 index 00000000000..95a0c3eee65 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/token.go @@ -0,0 +1,118 @@ +package token + +import ( + "fmt" + "strconv" + + hcltoken "github.com/hashicorp/hcl/hcl/token" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + + identifier_beg + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + NULL // null + literal_end + identifier_end + + operator_beg + LBRACK // [ + LBRACE // { + COMMA // , + PERIOD // . + COLON // : + + RBRACK // ] + RBRACE // } + + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + + NUMBER: "NUMBER", + FLOAT: "FLOAT", + BOOL: "BOOL", + STRING: "STRING", + NULL: "NULL", + + LBRACK: "LBRACK", + LBRACE: "LBRACE", + COMMA: "COMMA", + PERIOD: "PERIOD", + COLON: "COLON", + + RBRACK: "RBRACK", + RBRACE: "RBRACE", +} + +// String returns the string corresponding to the token tok. +func (t Type) String() string { + s := "" + if 0 <= t && t < Type(len(tokens)) { + s = tokens[t] + } + if s == "" { + s = "token(" + strconv.Itoa(int(t)) + ")" + } + return s +} + +// IsIdentifier returns true for tokens corresponding to identifiers and basic +// type literals; it returns false otherwise. +func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } + +// IsLiteral returns true for tokens corresponding to basic type literals; it +// returns false otherwise. +func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } + +// String returns the token's literal text. Note that this is only +// applicable for certain token types, such as token.IDENT, +// token.STRING, etc.. +func (t Token) String() string { + return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) +} + +// HCLToken converts this token to an HCL token. +// +// The token type must be a literal type or this will panic. +func (t Token) HCLToken() hcltoken.Token { + switch t.Type { + case BOOL: + return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} + case FLOAT: + return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} + case NULL: + return hcltoken.Token{Type: hcltoken.STRING, Text: ""} + case NUMBER: + return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} + case STRING: + return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} + default: + panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) + } +} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go new file mode 100644 index 00000000000..d9993c2928a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/lex.go @@ -0,0 +1,38 @@ +package hcl + +import ( + "unicode" + "unicode/utf8" +) + +type lexModeValue byte + +const ( + lexModeUnknown lexModeValue = iota + lexModeHcl + lexModeJson +) + +// lexMode returns whether we're going to be parsing in JSON +// mode or HCL mode. +func lexMode(v []byte) lexModeValue { + var ( + r rune + w int + offset int + ) + + for { + r, w = utf8.DecodeRune(v[offset:]) + offset += w + if unicode.IsSpace(r) { + continue + } + if r == '{' { + return lexModeJson + } + break + } + + return lexModeHcl +} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go new file mode 100644 index 00000000000..1fca53c4cee --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/parse.go @@ -0,0 +1,39 @@ +package hcl + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hclParser "github.com/hashicorp/hcl/hcl/parser" + jsonParser "github.com/hashicorp/hcl/json/parser" +) + +// ParseBytes accepts as input byte slice and returns ast tree. +// +// Input can be either JSON or HCL +func ParseBytes(in []byte) (*ast.File, error) { + return parse(in) +} + +// ParseString accepts input as a string and returns ast tree. +func ParseString(input string) (*ast.File, error) { + return parse([]byte(input)) +} + +func parse(in []byte) (*ast.File, error) { + switch lexMode(in) { + case lexModeHcl: + return hclParser.Parse(in) + case lexModeJson: + return jsonParser.Parse(in) + } + + return nil, fmt.Errorf("unknown config format") +} + +// Parse parses the given input and returns the root object. +// +// The input format can be either HCL or JSON. +func Parse(input string) (*ast.File, error) { + return parse([]byte(input)) +} diff --git a/vendor/github.com/hashicorp/vault/api/LICENSE b/vendor/github.com/hashicorp/vault/api/LICENSE new file mode 100644 index 00000000000..f4f97ee5853 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/LICENSE @@ -0,0 +1,365 @@ +Copyright (c) 2015 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/vault/api/README.md b/vendor/github.com/hashicorp/vault/api/README.md new file mode 100644 index 00000000000..7230ce779fe --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/README.md @@ -0,0 +1,9 @@ +Vault API +================= + +This provides the `github.com/hashicorp/vault/api` package which contains code useful for interacting with a Vault server. + +For examples of how to use this module, see the [vault-examples](https://github.com/hashicorp/vault-examples) repo. +For a step-by-step walkthrough on using these client libraries, see the [developer quickstart](https://www.vaultproject.io/docs/get-started/developer-qs). + +[![GoDoc](https://godoc.org/github.com/hashicorp/vault/api?status.png)](https://godoc.org/github.com/hashicorp/vault/api) \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/api/auth.go b/vendor/github.com/hashicorp/vault/api/auth.go new file mode 100644 index 00000000000..fa92de4b3fd --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/auth.go @@ -0,0 +1,112 @@ +package api + +import ( + "context" + "fmt" +) + +// Auth is used to perform credential backend related operations. +type Auth struct { + c *Client +} + +type AuthMethod interface { + Login(ctx context.Context, client *Client) (*Secret, error) +} + +// Auth is used to return the client for credential-backend API calls. +func (c *Client) Auth() *Auth { + return &Auth{c: c} +} + +// Login sets up the required request body for login requests to the given auth +// method's /login API endpoint, and then performs a write to it. After a +// successful login, this method will automatically set the client's token to +// the login response's ClientToken as well. +// +// The Secret returned is the authentication secret, which if desired can be +// passed as input to the NewLifetimeWatcher method in order to start +// automatically renewing the token. +func (a *Auth) Login(ctx context.Context, authMethod AuthMethod) (*Secret, error) { + if authMethod == nil { + return nil, fmt.Errorf("no auth method provided for login") + } + return a.login(ctx, authMethod) +} + +// MFALogin is a wrapper that helps satisfy Vault's MFA implementation. +// If optional credentials are provided a single-phase login will be attempted +// and the resulting Secret will contain a ClientToken if the authentication is successful. +// The client's token will also be set accordingly. +// +// If no credentials are provided a two-phase MFA login will be assumed and the resulting +// Secret will have a MFARequirement containing the MFARequestID to be used in a follow-up +// call to `sys/mfa/validate` or by passing it to the method (*Auth).MFAValidate. +func (a *Auth) MFALogin(ctx context.Context, authMethod AuthMethod, creds ...string) (*Secret, error) { + if len(creds) > 0 { + a.c.SetMFACreds(creds) + return a.login(ctx, authMethod) + } + + return a.twoPhaseMFALogin(ctx, authMethod) +} + +// MFAValidate validates an MFA request using the appropriate payload and a secret containing +// Auth.MFARequirement, like the one returned by MFALogin when credentials are not provided. +// Upon successful validation the client token will be set accordingly. +// +// The Secret returned is the authentication secret, which if desired can be +// passed as input to the NewLifetimeWatcher method in order to start +// automatically renewing the token. +func (a *Auth) MFAValidate(ctx context.Context, mfaSecret *Secret, payload map[string]interface{}) (*Secret, error) { + if mfaSecret == nil || mfaSecret.Auth == nil || mfaSecret.Auth.MFARequirement == nil { + return nil, fmt.Errorf("secret does not contain MFARequirements") + } + + s, err := a.c.Sys().MFAValidateWithContext(ctx, mfaSecret.Auth.MFARequirement.GetMFARequestID(), payload) + if err != nil { + return nil, err + } + + return a.checkAndSetToken(s) +} + +// login performs the (*AuthMethod).Login() with the configured client and checks that a ClientToken is returned +func (a *Auth) login(ctx context.Context, authMethod AuthMethod) (*Secret, error) { + s, err := authMethod.Login(ctx, a.c) + if err != nil { + return nil, fmt.Errorf("unable to log in to auth method: %w", err) + } + + return a.checkAndSetToken(s) +} + +// twoPhaseMFALogin performs the (*AuthMethod).Login() with the configured client +// and checks that an MFARequirement is returned +func (a *Auth) twoPhaseMFALogin(ctx context.Context, authMethod AuthMethod) (*Secret, error) { + s, err := authMethod.Login(ctx, a.c) + if err != nil { + return nil, fmt.Errorf("unable to log in: %w", err) + } + if s == nil || s.Auth == nil || s.Auth.MFARequirement == nil { + if s != nil { + s.Warnings = append(s.Warnings, "expected secret to contain MFARequirements") + } + return s, fmt.Errorf("assumed two-phase MFA login, returned secret is missing MFARequirements") + } + + return s, nil +} + +func (a *Auth) checkAndSetToken(s *Secret) (*Secret, error) { + if s == nil || s.Auth == nil || s.Auth.ClientToken == "" { + if s != nil { + s.Warnings = append(s.Warnings, "expected secret to contain ClientToken") + } + return s, fmt.Errorf("response did not return ClientToken, client token not set") + } + + a.c.SetToken(s.Auth.ClientToken) + + return s, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/auth_token.go b/vendor/github.com/hashicorp/vault/api/auth_token.go new file mode 100644 index 00000000000..52be1e7852b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/auth_token.go @@ -0,0 +1,374 @@ +package api + +import ( + "context" + "net/http" +) + +// TokenAuth is used to perform token backend operations on Vault +type TokenAuth struct { + c *Client +} + +// Token is used to return the client for token-backend API calls +func (a *Auth) Token() *TokenAuth { + return &TokenAuth{c: a.c} +} + +func (c *TokenAuth) Create(opts *TokenCreateRequest) (*Secret, error) { + return c.CreateWithContext(context.Background(), opts) +} + +func (c *TokenAuth) CreateWithContext(ctx context.Context, opts *TokenCreateRequest) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/create") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) CreateOrphan(opts *TokenCreateRequest) (*Secret, error) { + return c.CreateOrphanWithContext(context.Background(), opts) +} + +func (c *TokenAuth) CreateOrphanWithContext(ctx context.Context, opts *TokenCreateRequest) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/create-orphan") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) CreateWithRole(opts *TokenCreateRequest, roleName string) (*Secret, error) { + return c.CreateWithRoleWithContext(context.Background(), opts, roleName) +} + +func (c *TokenAuth) CreateWithRoleWithContext(ctx context.Context, opts *TokenCreateRequest, roleName string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/create/"+roleName) + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) Lookup(token string) (*Secret, error) { + return c.LookupWithContext(context.Background(), token) +} + +func (c *TokenAuth) LookupWithContext(ctx context.Context, token string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/lookup") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + }); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) LookupAccessor(accessor string) (*Secret, error) { + return c.LookupAccessorWithContext(context.Background(), accessor) +} + +func (c *TokenAuth) LookupAccessorWithContext(ctx context.Context, accessor string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/lookup-accessor") + if err := r.SetJSONBody(map[string]interface{}{ + "accessor": accessor, + }); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) LookupSelf() (*Secret, error) { + return c.LookupSelfWithContext(context.Background()) +} + +func (c *TokenAuth) LookupSelfWithContext(ctx context.Context) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/auth/token/lookup-self") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) RenewAccessor(accessor string, increment int) (*Secret, error) { + return c.RenewAccessorWithContext(context.Background(), accessor, increment) +} + +func (c *TokenAuth) RenewAccessorWithContext(ctx context.Context, accessor string, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/renew-accessor") + if err := r.SetJSONBody(map[string]interface{}{ + "accessor": accessor, + "increment": increment, + }); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) Renew(token string, increment int) (*Secret, error) { + return c.RenewWithContext(context.Background(), token, increment) +} + +func (c *TokenAuth) RenewWithContext(ctx context.Context, token string, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/renew") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + "increment": increment, + }); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) RenewSelf(increment int) (*Secret, error) { + return c.RenewSelfWithContext(context.Background(), increment) +} + +func (c *TokenAuth) RenewSelfWithContext(ctx context.Context, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/renew-self") + + body := map[string]interface{}{"increment": increment} + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +// RenewTokenAsSelf wraps RenewTokenAsSelfWithContext using context.Background. +func (c *TokenAuth) RenewTokenAsSelf(token string, increment int) (*Secret, error) { + return c.RenewTokenAsSelfWithContext(context.Background(), token, increment) +} + +// RenewTokenAsSelfWithContext behaves like renew-self, but authenticates using a provided +// token instead of the token attached to the client. +func (c *TokenAuth) RenewTokenAsSelfWithContext(ctx context.Context, token string, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/renew-self") + r.ClientToken = token + + body := map[string]interface{}{"increment": increment} + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +// RevokeAccessor wraps RevokeAccessorWithContext using context.Background. +func (c *TokenAuth) RevokeAccessor(accessor string) error { + return c.RevokeAccessorWithContext(context.Background(), accessor) +} + +// RevokeAccessorWithContext revokes a token associated with the given accessor +// along with all the child tokens. +func (c *TokenAuth) RevokeAccessorWithContext(ctx context.Context, accessor string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/revoke-accessor") + if err := r.SetJSONBody(map[string]interface{}{ + "accessor": accessor, + }); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeOrphan wraps RevokeOrphanWithContext using context.Background. +func (c *TokenAuth) RevokeOrphan(token string) error { + return c.RevokeOrphanWithContext(context.Background(), token) +} + +// RevokeOrphanWithContext revokes a token without revoking the tree underneath it (so +// child tokens are orphaned rather than revoked) +func (c *TokenAuth) RevokeOrphanWithContext(ctx context.Context, token string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/revoke-orphan") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + }); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeSelf wraps RevokeSelfWithContext using context.Background. +func (c *TokenAuth) RevokeSelf(token string) error { + return c.RevokeSelfWithContext(context.Background(), token) +} + +// RevokeSelfWithContext revokes the token making the call. The `token` parameter is kept +// for backwards compatibility but is ignored; only the client's set token has +// an effect. +func (c *TokenAuth) RevokeSelfWithContext(ctx context.Context, token string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/revoke-self") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeTree wraps RevokeTreeWithContext using context.Background. +func (c *TokenAuth) RevokeTree(token string) error { + return c.RevokeTreeWithContext(context.Background(), token) +} + +// RevokeTreeWithContext is the "normal" revoke operation that revokes the given token and +// the entire tree underneath -- all of its child tokens, their child tokens, +// etc. +func (c *TokenAuth) RevokeTreeWithContext(ctx context.Context, token string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/revoke") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + }); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// TokenCreateRequest is the options structure for creating a token. +type TokenCreateRequest struct { + ID string `json:"id,omitempty"` + Policies []string `json:"policies,omitempty"` + Metadata map[string]string `json:"meta,omitempty"` + Lease string `json:"lease,omitempty"` + TTL string `json:"ttl,omitempty"` + ExplicitMaxTTL string `json:"explicit_max_ttl,omitempty"` + Period string `json:"period,omitempty"` + NoParent bool `json:"no_parent,omitempty"` + NoDefaultPolicy bool `json:"no_default_policy,omitempty"` + DisplayName string `json:"display_name"` + NumUses int `json:"num_uses"` + Renewable *bool `json:"renewable,omitempty"` + Type string `json:"type"` + EntityAlias string `json:"entity_alias"` +} diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go new file mode 100644 index 00000000000..c6843348e58 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/client.go @@ -0,0 +1,1795 @@ +package api + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "crypto/tls" + "encoding/base64" + "encoding/hex" + "fmt" + "net" + "net/http" + "net/url" + "os" + "path" + "strconv" + "strings" + "sync" + "time" + "unicode" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/go-rootcerts" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "golang.org/x/net/http2" + "golang.org/x/time/rate" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + EnvVaultAddress = "VAULT_ADDR" + EnvVaultAgentAddr = "VAULT_AGENT_ADDR" + EnvVaultCACert = "VAULT_CACERT" + EnvVaultCACertBytes = "VAULT_CACERT_BYTES" + EnvVaultCAPath = "VAULT_CAPATH" + EnvVaultClientCert = "VAULT_CLIENT_CERT" + EnvVaultClientKey = "VAULT_CLIENT_KEY" + EnvVaultClientTimeout = "VAULT_CLIENT_TIMEOUT" + EnvVaultSRVLookup = "VAULT_SRV_LOOKUP" + EnvVaultSkipVerify = "VAULT_SKIP_VERIFY" + EnvVaultNamespace = "VAULT_NAMESPACE" + EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME" + EnvVaultWrapTTL = "VAULT_WRAP_TTL" + EnvVaultMaxRetries = "VAULT_MAX_RETRIES" + EnvVaultToken = "VAULT_TOKEN" + EnvVaultMFA = "VAULT_MFA" + EnvRateLimit = "VAULT_RATE_LIMIT" + EnvHTTPProxy = "VAULT_HTTP_PROXY" + EnvVaultProxyAddr = "VAULT_PROXY_ADDR" + EnvVaultDisableRedirects = "VAULT_DISABLE_REDIRECTS" + HeaderIndex = "X-Vault-Index" + HeaderForward = "X-Vault-Forward" + HeaderInconsistent = "X-Vault-Inconsistent" + TLSErrorString = "This error usually means that the server is running with TLS disabled\n" + + "but the client is configured to use TLS. Please either enable TLS\n" + + "on the server or run the client with -address set to an address\n" + + "that uses the http protocol:\n\n" + + " vault -address http://
\n\n" + + "You can also set the VAULT_ADDR environment variable:\n\n\n" + + " VAULT_ADDR=http://
vault \n\n" + + "where
is replaced by the actual address to the server." +) + +// Deprecated values +const ( + EnvVaultAgentAddress = "VAULT_AGENT_ADDR" + EnvVaultInsecure = "VAULT_SKIP_VERIFY" +) + +// WrappingLookupFunc is a function that, given an HTTP verb and a path, +// returns an optional string duration to be used for response wrapping (e.g. +// "15s", or simply "15"). The path will not begin with "/v1/" or "v1/" or "/", +// however, end-of-path forward slashes are not trimmed, so must match your +// called path precisely. Response wrapping will only be used when the return +// value is not the empty string. +type WrappingLookupFunc func(operation, path string) string + +// Config is used to configure the creation of the client. +type Config struct { + modifyLock sync.RWMutex + + // Address is the address of the Vault server. This should be a complete + // URL such as "http://vault.example.com". If you need a custom SSL + // cert or want to enable insecure mode, you need to specify a custom + // HttpClient. + Address string + + // AgentAddress is the address of the local Vault agent. This should be a + // complete URL such as "http://vault.example.com". + AgentAddress string + + // HttpClient is the HTTP client to use. Vault sets sane defaults for the + // http.Client and its associated http.Transport created in DefaultConfig. + // If you must modify Vault's defaults, it is suggested that you start with + // that client and modify as needed rather than start with an empty client + // (or http.DefaultClient). + HttpClient *http.Client + + // MinRetryWait controls the minimum time to wait before retrying when a 5xx + // error occurs. Defaults to 1000 milliseconds. + MinRetryWait time.Duration + + // MaxRetryWait controls the maximum time to wait before retrying when a 5xx + // error occurs. Defaults to 1500 milliseconds. + MaxRetryWait time.Duration + + // MaxRetries controls the maximum number of times to retry when a 5xx + // error occurs. Set to 0 to disable retrying. Defaults to 2 (for a total + // of three tries). + MaxRetries int + + // Timeout is for setting custom timeout parameter in the HttpClient + Timeout time.Duration + + // If there is an error when creating the configuration, this will be the + // error + Error error + + // The Backoff function to use; a default is used if not provided + Backoff retryablehttp.Backoff + + // The CheckRetry function to use; a default is used if not provided + CheckRetry retryablehttp.CheckRetry + + // Logger is the leveled logger to provide to the retryable HTTP client. + Logger retryablehttp.LeveledLogger + + // Limiter is the rate limiter used by the client. + // If this pointer is nil, then there will be no limit set. + // In contrast, if this pointer is set, even to an empty struct, + // then that limiter will be used. Note that an empty Limiter + // is equivalent blocking all events. + Limiter *rate.Limiter + + // OutputCurlString causes the actual request to return an error of type + // *OutputStringError. Type asserting the error message will allow + // fetching a cURL-compatible string for the operation. + // + // Note: It is not thread-safe to set this and make concurrent requests + // with the same client. Cloning a client will not clone this value. + OutputCurlString bool + + // OutputPolicy causes the actual request to return an error of type + // *OutputPolicyError. Type asserting the error message will display + // an example of the required policy HCL needed for the operation. + // + // Note: It is not thread-safe to set this and make concurrent requests + // with the same client. Cloning a client will not clone this value. + OutputPolicy bool + + // curlCACert, curlCAPath, curlClientCert and curlClientKey are used to keep + // track of the name of the TLS certs and keys when OutputCurlString is set. + // Cloning a client will also not clone those values. + curlCACert, curlCAPath string + curlClientCert, curlClientKey string + + // SRVLookup enables the client to lookup the host through DNS SRV lookup + SRVLookup bool + + // CloneHeaders ensures that the source client's headers are copied to + // its clone. + CloneHeaders bool + + // CloneToken from parent. + CloneToken bool + + // ReadYourWrites ensures isolated read-after-write semantics by + // providing discovered cluster replication states in each request. + // The shared state is automatically propagated to all Client clones. + // + // Note: Careful consideration should be made prior to enabling this setting + // since there will be a performance penalty paid upon each request. + // This feature requires Enterprise server-side. + ReadYourWrites bool + + // DisableRedirects when set to true, will prevent the client from + // automatically following a (single) redirect response to its initial + // request. This behavior may be desirable if using Vault CLI on the server + // side. + // + // Note: Disabling redirect following behavior could cause issues with + // commands such as 'vault operator raft snapshot' as this redirects to the + // primary node. + DisableRedirects bool +} + +// TLSConfig contains the parameters needed to configure TLS on the HTTP client +// used to communicate with Vault. +type TLSConfig struct { + // CACert is the path to a PEM-encoded CA cert file to use to verify the + // Vault server SSL certificate. It takes precedence over CACertBytes + // and CAPath. + CACert string + + // CACertBytes is a PEM-encoded certificate or bundle. It takes precedence + // over CAPath. + CACertBytes []byte + + // CAPath is the path to a directory of PEM-encoded CA cert files to verify + // the Vault server SSL certificate. + CAPath string + + // ClientCert is the path to the certificate for Vault communication + ClientCert string + + // ClientKey is the path to the private key for Vault communication + ClientKey string + + // TLSServerName, if set, is used to set the SNI host when connecting via + // TLS. + TLSServerName string + + // Insecure enables or disables SSL verification + Insecure bool +} + +// DefaultConfig returns a default configuration for the client. It is +// safe to modify the return value of this function. +// +// The default Address is https://127.0.0.1:8200, but this can be overridden by +// setting the `VAULT_ADDR` environment variable. +// +// If an error is encountered, the Error field on the returned *Config will be populated with the specific error. +func DefaultConfig() *Config { + config := &Config{ + Address: "https://127.0.0.1:8200", + HttpClient: cleanhttp.DefaultPooledClient(), + Timeout: time.Second * 60, + MinRetryWait: time.Millisecond * 1000, + MaxRetryWait: time.Millisecond * 1500, + MaxRetries: 2, + Backoff: retryablehttp.LinearJitterBackoff, + } + + transport := config.HttpClient.Transport.(*http.Transport) + transport.TLSHandshakeTimeout = 10 * time.Second + transport.TLSClientConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + } + if err := http2.ConfigureTransport(transport); err != nil { + config.Error = err + return config + } + + if err := config.ReadEnvironment(); err != nil { + config.Error = err + return config + } + + // Ensure redirects are not automatically followed + // Note that this is sane for the API client as it has its own + // redirect handling logic (and thus also for command/meta), + // but in e.g. http_test actual redirect handling is necessary + config.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + // Returning this value causes the Go net library to not close the + // response body and to nil out the error. Otherwise retry clients may + // try three times on every redirect because it sees an error from this + // function (to prevent redirects) passing through to it. + return http.ErrUseLastResponse + } + + return config +} + +// configureTLS is a lock free version of ConfigureTLS that can be used in +// ReadEnvironment where the lock is already hold +func (c *Config) configureTLS(t *TLSConfig) error { + if c.HttpClient == nil { + c.HttpClient = DefaultConfig().HttpClient + } + clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig + + var clientCert tls.Certificate + foundClientCert := false + + switch { + case t.ClientCert != "" && t.ClientKey != "": + var err error + clientCert, err = tls.LoadX509KeyPair(t.ClientCert, t.ClientKey) + if err != nil { + return err + } + foundClientCert = true + c.curlClientCert = t.ClientCert + c.curlClientKey = t.ClientKey + case t.ClientCert != "" || t.ClientKey != "": + return fmt.Errorf("both client cert and client key must be provided") + } + + if t.CACert != "" || len(t.CACertBytes) != 0 || t.CAPath != "" { + c.curlCACert = t.CACert + c.curlCAPath = t.CAPath + rootConfig := &rootcerts.Config{ + CAFile: t.CACert, + CACertificate: t.CACertBytes, + CAPath: t.CAPath, + } + if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil { + return err + } + } + + if t.Insecure { + clientTLSConfig.InsecureSkipVerify = true + } + + if foundClientCert { + // We use this function to ignore the server's preferential list of + // CAs, otherwise any CA used for the cert auth backend must be in the + // server's CA pool + clientTLSConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { + return &clientCert, nil + } + } + + if t.TLSServerName != "" { + clientTLSConfig.ServerName = t.TLSServerName + } + + return nil +} + +// ConfigureTLS takes a set of TLS configurations and applies those to the +// HTTP client. +func (c *Config) ConfigureTLS(t *TLSConfig) error { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + return c.configureTLS(t) +} + +// ReadEnvironment reads configuration information from the environment. If +// there is an error, no configuration value is updated. +func (c *Config) ReadEnvironment() error { + var envAddress string + var envAgentAddress string + var envCACert string + var envCACertBytes []byte + var envCAPath string + var envClientCert string + var envClientKey string + var envClientTimeout time.Duration + var envInsecure bool + var envTLSServerName string + var envMaxRetries *uint64 + var envSRVLookup bool + var limit *rate.Limiter + var envVaultProxy string + var envVaultDisableRedirects bool + + // Parse the environment variables + if v := os.Getenv(EnvVaultAddress); v != "" { + envAddress = v + } + if v := os.Getenv(EnvVaultAgentAddr); v != "" { + envAgentAddress = v + } + if v := os.Getenv(EnvVaultMaxRetries); v != "" { + maxRetries, err := strconv.ParseUint(v, 10, 32) + if err != nil { + return err + } + envMaxRetries = &maxRetries + } + if v := os.Getenv(EnvVaultCACert); v != "" { + envCACert = v + } + if v := os.Getenv(EnvVaultCACertBytes); v != "" { + envCACertBytes = []byte(v) + } + if v := os.Getenv(EnvVaultCAPath); v != "" { + envCAPath = v + } + if v := os.Getenv(EnvVaultClientCert); v != "" { + envClientCert = v + } + if v := os.Getenv(EnvVaultClientKey); v != "" { + envClientKey = v + } + if v := os.Getenv(EnvRateLimit); v != "" { + rateLimit, burstLimit, err := parseRateLimit(v) + if err != nil { + return err + } + limit = rate.NewLimiter(rate.Limit(rateLimit), burstLimit) + } + if t := os.Getenv(EnvVaultClientTimeout); t != "" { + clientTimeout, err := parseutil.ParseDurationSecond(t) + if err != nil { + return fmt.Errorf("could not parse %q", EnvVaultClientTimeout) + } + envClientTimeout = clientTimeout + } + if v := os.Getenv(EnvVaultSkipVerify); v != "" { + var err error + envInsecure, err = strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("could not parse %s", EnvVaultSkipVerify) + } + } + if v := os.Getenv(EnvVaultSRVLookup); v != "" { + var err error + envSRVLookup, err = strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("could not parse %s", EnvVaultSRVLookup) + } + } + + if v := os.Getenv(EnvVaultTLSServerName); v != "" { + envTLSServerName = v + } + + if v := os.Getenv(EnvHTTPProxy); v != "" { + envVaultProxy = v + } + + // VAULT_PROXY_ADDR supersedes VAULT_HTTP_PROXY + if v := os.Getenv(EnvVaultProxyAddr); v != "" { + envVaultProxy = v + } + + if v := os.Getenv(EnvVaultDisableRedirects); v != "" { + var err error + envVaultDisableRedirects, err = strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("could not parse %s", EnvVaultDisableRedirects) + } + + c.DisableRedirects = envVaultDisableRedirects + } + + // Configure the HTTP clients TLS configuration. + t := &TLSConfig{ + CACert: envCACert, + CACertBytes: envCACertBytes, + CAPath: envCAPath, + ClientCert: envClientCert, + ClientKey: envClientKey, + TLSServerName: envTLSServerName, + Insecure: envInsecure, + } + + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + c.SRVLookup = envSRVLookup + c.Limiter = limit + + if err := c.configureTLS(t); err != nil { + return err + } + + if envAddress != "" { + c.Address = envAddress + } + + if envAgentAddress != "" { + c.AgentAddress = envAgentAddress + } + + if envMaxRetries != nil { + c.MaxRetries = int(*envMaxRetries) + } + + if envClientTimeout != 0 { + c.Timeout = envClientTimeout + } + + if envVaultProxy != "" { + u, err := url.Parse(envVaultProxy) + if err != nil { + return err + } + + transport := c.HttpClient.Transport.(*http.Transport) + transport.Proxy = http.ProxyURL(u) + } + + return nil +} + +// ParseAddress transforms the provided address into a url.URL and handles +// the case of Unix domain sockets by setting the DialContext in the +// configuration's HttpClient.Transport. This function must be called with +// c.modifyLock held for write access. +func (c *Config) ParseAddress(address string) (*url.URL, error) { + u, err := url.Parse(address) + if err != nil { + return nil, err + } + + c.Address = address + + if strings.HasPrefix(address, "unix://") { + // When the address begins with unix://, always change the transport's + // DialContext (to match previous behaviour) + socket := strings.TrimPrefix(address, "unix://") + + if transport, ok := c.HttpClient.Transport.(*http.Transport); ok { + transport.DialContext = func(context.Context, string, string) (net.Conn, error) { + return net.Dial("unix", socket) + } + + // Since the address points to a unix domain socket, the scheme in the + // *URL would be set to `unix`. The *URL in the client is expected to + // be pointing to the protocol used in the application layer and not to + // the transport layer. Hence, setting the fields accordingly. + u.Scheme = "http" + u.Host = socket + u.Path = "" + } else { + return nil, fmt.Errorf("attempting to specify unix:// address with non-transport transport") + } + } else if strings.HasPrefix(c.Address, "unix://") { + // When the address being set does not begin with unix:// but the previous + // address in the Config did, change the transport's DialContext back to + // use the default configuration that cleanhttp uses. + + if transport, ok := c.HttpClient.Transport.(*http.Transport); ok { + transport.DialContext = cleanhttp.DefaultPooledTransport().DialContext + } + } + + return u, nil +} + +func parseRateLimit(val string) (rate float64, burst int, err error) { + _, err = fmt.Sscanf(val, "%f:%d", &rate, &burst) + if err != nil { + rate, err = strconv.ParseFloat(val, 64) + if err != nil { + err = fmt.Errorf("%v was provided but incorrectly formatted", EnvRateLimit) + } + burst = int(rate) + } + + return rate, burst, err +} + +// Client is the client to the Vault API. Create a client with NewClient. +type Client struct { + modifyLock sync.RWMutex + addr *url.URL + config *Config + token string + headers http.Header + wrappingLookupFunc WrappingLookupFunc + mfaCreds []string + policyOverride bool + requestCallbacks []RequestCallback + responseCallbacks []ResponseCallback + replicationStateStore *replicationStateStore +} + +// NewClient returns a new client for the given configuration. +// +// If the configuration is nil, Vault will use configuration from +// DefaultConfig(), which is the recommended starting configuration. +// +// If the environment variable `VAULT_TOKEN` is present, the token will be +// automatically added to the client. Otherwise, you must manually call +// `SetToken()`. +func NewClient(c *Config) (*Client, error) { + def := DefaultConfig() + if def == nil { + return nil, fmt.Errorf("could not create/read default configuration") + } + if def.Error != nil { + return nil, errwrap.Wrapf("error encountered setting up default configuration: {{err}}", def.Error) + } + + if c == nil { + c = def + } + + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + if c.MinRetryWait == 0 { + c.MinRetryWait = def.MinRetryWait + } + + if c.MaxRetryWait == 0 { + c.MaxRetryWait = def.MaxRetryWait + } + + if c.HttpClient == nil { + c.HttpClient = def.HttpClient + } + if c.HttpClient.Transport == nil { + c.HttpClient.Transport = def.HttpClient.Transport + } + + address := c.Address + if c.AgentAddress != "" { + address = c.AgentAddress + } + + u, err := c.ParseAddress(address) + if err != nil { + return nil, err + } + + client := &Client{ + addr: u, + config: c, + headers: make(http.Header), + } + + if c.ReadYourWrites { + client.replicationStateStore = &replicationStateStore{} + } + + // Add the VaultRequest SSRF protection header + client.headers[consts.RequestHeaderName] = []string{"true"} + + if token := os.Getenv(EnvVaultToken); token != "" { + client.token = token + } + + if namespace := os.Getenv(EnvVaultNamespace); namespace != "" { + client.setNamespace(namespace) + } + + return client, nil +} + +func (c *Client) CloneConfig() *Config { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + newConfig := DefaultConfig() + newConfig.Address = c.config.Address + newConfig.AgentAddress = c.config.AgentAddress + newConfig.MinRetryWait = c.config.MinRetryWait + newConfig.MaxRetryWait = c.config.MaxRetryWait + newConfig.MaxRetries = c.config.MaxRetries + newConfig.Timeout = c.config.Timeout + newConfig.Backoff = c.config.Backoff + newConfig.CheckRetry = c.config.CheckRetry + newConfig.Logger = c.config.Logger + newConfig.Limiter = c.config.Limiter + newConfig.SRVLookup = c.config.SRVLookup + newConfig.CloneHeaders = c.config.CloneHeaders + newConfig.CloneToken = c.config.CloneToken + newConfig.ReadYourWrites = c.config.ReadYourWrites + + // we specifically want a _copy_ of the client here, not a pointer to the original one + newClient := *c.config.HttpClient + newConfig.HttpClient = &newClient + + return newConfig +} + +// SetAddress sets the address of Vault in the client. The format of address should be +// "://:". Setting this on a client will override the +// value of VAULT_ADDR environment variable. +func (c *Client) SetAddress(addr string) error { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + parsedAddr, err := c.config.ParseAddress(addr) + if err != nil { + return errwrap.Wrapf("failed to set address: {{err}}", err) + } + + c.addr = parsedAddr + return nil +} + +// Address returns the Vault URL the client is configured to connect to +func (c *Client) Address() string { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + return c.addr.String() +} + +func (c *Client) SetCheckRedirect(f func(*http.Request, []*http.Request) error) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.HttpClient.CheckRedirect = f +} + +// SetLimiter will set the rate limiter for this client. +// This method is thread-safe. +// rateLimit and burst are specified according to https://godoc.org/golang.org/x/time/rate#NewLimiter +func (c *Client) SetLimiter(rateLimit float64, burst int) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.Limiter = rate.NewLimiter(rate.Limit(rateLimit), burst) +} + +func (c *Client) Limiter() *rate.Limiter { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.Limiter +} + +// SetMinRetryWait sets the minimum time to wait before retrying in the case of certain errors. +func (c *Client) SetMinRetryWait(retryWait time.Duration) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.MinRetryWait = retryWait +} + +func (c *Client) MinRetryWait() time.Duration { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.MinRetryWait +} + +// SetMaxRetryWait sets the maximum time to wait before retrying in the case of certain errors. +func (c *Client) SetMaxRetryWait(retryWait time.Duration) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.MaxRetryWait = retryWait +} + +func (c *Client) MaxRetryWait() time.Duration { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.MaxRetryWait +} + +// SetMaxRetries sets the number of retries that will be used in the case of certain errors +func (c *Client) SetMaxRetries(retries int) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.MaxRetries = retries +} + +func (c *Client) SetMaxIdleConnections(idle int) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.HttpClient.Transport.(*http.Transport).MaxIdleConns = idle +} + +func (c *Client) MaxIdleConnections() int { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + return c.config.HttpClient.Transport.(*http.Transport).MaxIdleConns +} + +func (c *Client) SetDisableKeepAlives(disable bool) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.HttpClient.Transport.(*http.Transport).DisableKeepAlives = disable +} + +func (c *Client) DisableKeepAlives() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.HttpClient.Transport.(*http.Transport).DisableKeepAlives +} + +func (c *Client) MaxRetries() int { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.MaxRetries +} + +func (c *Client) SetSRVLookup(srv bool) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.SRVLookup = srv +} + +func (c *Client) SRVLookup() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.SRVLookup +} + +// SetCheckRetry sets the CheckRetry function to be used for future requests. +func (c *Client) SetCheckRetry(checkRetry retryablehttp.CheckRetry) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.CheckRetry = checkRetry +} + +func (c *Client) CheckRetry() retryablehttp.CheckRetry { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.CheckRetry +} + +// SetClientTimeout sets the client request timeout +func (c *Client) SetClientTimeout(timeout time.Duration) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.Timeout = timeout +} + +func (c *Client) ClientTimeout() time.Duration { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.Timeout +} + +func (c *Client) OutputCurlString() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.OutputCurlString +} + +func (c *Client) SetOutputCurlString(curl bool) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.OutputCurlString = curl +} + +func (c *Client) OutputPolicy() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.OutputPolicy +} + +func (c *Client) SetOutputPolicy(isSet bool) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.OutputPolicy = isSet +} + +// CurrentWrappingLookupFunc sets a lookup function that returns desired wrap TTLs +// for a given operation and path. +func (c *Client) CurrentWrappingLookupFunc() WrappingLookupFunc { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + return c.wrappingLookupFunc +} + +// SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs +// for a given operation and path. +func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.wrappingLookupFunc = lookupFunc +} + +// SetMFACreds sets the MFA credentials supplied either via the environment +// variable or via the command line. +func (c *Client) SetMFACreds(creds []string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.mfaCreds = creds +} + +// SetNamespace sets the namespace supplied either via the environment +// variable or via the command line. +func (c *Client) SetNamespace(namespace string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.setNamespace(namespace) +} + +func (c *Client) setNamespace(namespace string) { + if c.headers == nil { + c.headers = make(http.Header) + } + + c.headers.Set(consts.NamespaceHeaderName, namespace) +} + +// ClearNamespace removes the namespace header if set. +func (c *Client) ClearNamespace() { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + if c.headers != nil { + c.headers.Del(consts.NamespaceHeaderName) + } +} + +// Namespace returns the namespace currently set in this client. It will +// return an empty string if there is no namespace set. +func (c *Client) Namespace() string { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + if c.headers == nil { + return "" + } + return c.headers.Get(consts.NamespaceHeaderName) +} + +// WithNamespace makes a shallow copy of Client, modifies it to use +// the given namespace, and returns it. Passing an empty string will +// temporarily unset the namespace. +func (c *Client) WithNamespace(namespace string) *Client { + c2 := *c + c2.modifyLock = sync.RWMutex{} + c2.headers = c.Headers() + if namespace == "" { + c2.ClearNamespace() + } else { + c2.SetNamespace(namespace) + } + return &c2 +} + +// Token returns the access token being used by this client. It will +// return the empty string if there is no token set. +func (c *Client) Token() string { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + return c.token +} + +// SetToken sets the token directly. This won't perform any auth +// verification, it simply sets the token properly for future requests. +func (c *Client) SetToken(v string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.token = v +} + +// ClearToken deletes the token if it is set or does nothing otherwise. +func (c *Client) ClearToken() { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.token = "" +} + +// Headers gets the current set of headers used for requests. This returns a +// copy; to modify it call AddHeader or SetHeaders. +func (c *Client) Headers() http.Header { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + if c.headers == nil { + return nil + } + + ret := make(http.Header) + for k, v := range c.headers { + for _, val := range v { + ret[k] = append(ret[k], val) + } + } + + return ret +} + +// AddHeader allows a single header key/value pair to be added +// in a race-safe fashion. +func (c *Client) AddHeader(key, value string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.headers.Add(key, value) +} + +// SetHeaders clears all previous headers and uses only the given +// ones going forward. +func (c *Client) SetHeaders(headers http.Header) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.headers = headers +} + +// SetBackoff sets the backoff function to be used for future requests. +func (c *Client) SetBackoff(backoff retryablehttp.Backoff) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.Backoff = backoff +} + +func (c *Client) SetLogger(logger retryablehttp.LeveledLogger) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.Logger = logger +} + +// SetCloneHeaders to allow headers to be copied whenever the client is cloned. +func (c *Client) SetCloneHeaders(cloneHeaders bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.CloneHeaders = cloneHeaders +} + +// CloneHeaders gets the configured CloneHeaders value. +func (c *Client) CloneHeaders() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.CloneHeaders +} + +// SetCloneToken from parent +func (c *Client) SetCloneToken(cloneToken bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.CloneToken = cloneToken +} + +// CloneToken gets the configured CloneToken value. +func (c *Client) CloneToken() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.CloneToken +} + +// SetReadYourWrites to prevent reading stale cluster replication state. +func (c *Client) SetReadYourWrites(preventStaleReads bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + if preventStaleReads { + if c.replicationStateStore == nil { + c.replicationStateStore = &replicationStateStore{} + } + } else { + c.replicationStateStore = nil + } + + c.config.ReadYourWrites = preventStaleReads +} + +// ReadYourWrites gets the configured value of ReadYourWrites +func (c *Client) ReadYourWrites() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.ReadYourWrites +} + +// Clone creates a new client with the same configuration. Note that the same +// underlying http.Client is used; modifying the client from more than one +// goroutine at once may not be safe, so modify the client as needed and then +// clone. The headers are cloned based on the CloneHeaders property of the +// source config +// +// Also, only the client's config is currently copied; this means items not in +// the api.Config struct, such as policy override and wrapping function +// behavior, must currently then be set as desired on the new client. +func (c *Client) Clone() (*Client, error) { + return c.clone(c.config.CloneHeaders) +} + +// CloneWithHeaders creates a new client similar to Clone, with the difference +// being that the headers are always cloned +func (c *Client) CloneWithHeaders() (*Client, error) { + return c.clone(true) +} + +// clone creates a new client, with the headers being cloned based on the +// passed in cloneheaders boolean +func (c *Client) clone(cloneHeaders bool) (*Client, error) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + config := c.config + config.modifyLock.RLock() + defer config.modifyLock.RUnlock() + + newConfig := &Config{ + Address: config.Address, + HttpClient: config.HttpClient, + MinRetryWait: config.MinRetryWait, + MaxRetryWait: config.MaxRetryWait, + MaxRetries: config.MaxRetries, + Timeout: config.Timeout, + Backoff: config.Backoff, + CheckRetry: config.CheckRetry, + Logger: config.Logger, + Limiter: config.Limiter, + AgentAddress: config.AgentAddress, + SRVLookup: config.SRVLookup, + CloneHeaders: config.CloneHeaders, + CloneToken: config.CloneToken, + ReadYourWrites: config.ReadYourWrites, + } + client, err := NewClient(newConfig) + if err != nil { + return nil, err + } + + if cloneHeaders { + client.SetHeaders(c.Headers().Clone()) + } + + if config.CloneToken { + client.SetToken(c.token) + } + + client.replicationStateStore = c.replicationStateStore + + return client, nil +} + +// SetPolicyOverride sets whether requests should be sent with the policy +// override flag to request overriding soft-mandatory Sentinel policies (both +// RGPs and EGPs) +func (c *Client) SetPolicyOverride(override bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.policyOverride = override +} + +// NewRequest creates a new raw request object to query the Vault server +// configured for this client. This is an advanced method and generally +// doesn't need to be called externally. +func (c *Client) NewRequest(method, requestPath string) *Request { + c.modifyLock.RLock() + addr := c.addr + token := c.token + mfaCreds := c.mfaCreds + wrappingLookupFunc := c.wrappingLookupFunc + policyOverride := c.policyOverride + c.modifyLock.RUnlock() + + host := addr.Host + // if SRV records exist (see https://tools.ietf.org/html/draft-andrews-http-srv-02), lookup the SRV + // record and take the highest match; this is not designed for high-availability, just discovery + // Internet Draft specifies that the SRV record is ignored if a port is given + if addr.Port() == "" && c.config.SRVLookup { + _, addrs, err := net.LookupSRV("http", "tcp", addr.Hostname()) + if err == nil && len(addrs) > 0 { + host = fmt.Sprintf("%s:%d", addrs[0].Target, addrs[0].Port) + } + } + + req := &Request{ + Method: method, + URL: &url.URL{ + User: addr.User, + Scheme: addr.Scheme, + Host: host, + Path: path.Join(addr.Path, requestPath), + }, + Host: addr.Host, + ClientToken: token, + Params: make(map[string][]string), + } + + var lookupPath string + switch { + case strings.HasPrefix(requestPath, "/v1/"): + lookupPath = strings.TrimPrefix(requestPath, "/v1/") + case strings.HasPrefix(requestPath, "v1/"): + lookupPath = strings.TrimPrefix(requestPath, "v1/") + default: + lookupPath = requestPath + } + + req.MFAHeaderVals = mfaCreds + + if wrappingLookupFunc != nil { + req.WrapTTL = wrappingLookupFunc(method, lookupPath) + } else { + req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath) + } + + req.Headers = c.Headers() + req.PolicyOverride = policyOverride + + return req +} + +// RawRequest performs the raw request given. This request may be against +// a Vault server not configured with this client. This is an advanced operation +// that generally won't need to be called externally. +// +// Deprecated: This method should not be used directly. Use higher level +// methods instead. +func (c *Client) RawRequest(r *Request) (*Response, error) { + return c.RawRequestWithContext(context.Background(), r) +} + +// RawRequestWithContext performs the raw request given. This request may be against +// a Vault server not configured with this client. This is an advanced operation +// that generally won't need to be called externally. +// +// Deprecated: This method should not be used directly. Use higher level +// methods instead. +func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Response, error) { + // Note: we purposefully do not call cancel manually. The reason is + // when canceled, the request.Body will EOF when reading due to the way + // it streams data in. Cancel will still be run when the timeout is + // hit, so this doesn't really harm anything. + ctx, _ = c.withConfiguredTimeout(ctx) + return c.rawRequestWithContext(ctx, r) +} + +func (c *Client) rawRequestWithContext(ctx context.Context, r *Request) (*Response, error) { + c.modifyLock.RLock() + token := c.token + + c.config.modifyLock.RLock() + limiter := c.config.Limiter + minRetryWait := c.config.MinRetryWait + maxRetryWait := c.config.MaxRetryWait + maxRetries := c.config.MaxRetries + checkRetry := c.config.CheckRetry + backoff := c.config.Backoff + httpClient := c.config.HttpClient + ns := c.headers.Get(consts.NamespaceHeaderName) + outputCurlString := c.config.OutputCurlString + outputPolicy := c.config.OutputPolicy + logger := c.config.Logger + disableRedirects := c.config.DisableRedirects + c.config.modifyLock.RUnlock() + + c.modifyLock.RUnlock() + + // ensure that the most current namespace setting is used at the time of the call + // e.g. calls using (*Client).WithNamespace + switch ns { + case "": + r.Headers.Del(consts.NamespaceHeaderName) + default: + r.Headers.Set(consts.NamespaceHeaderName, ns) + } + + for _, cb := range c.requestCallbacks { + cb(r) + } + + if c.config.ReadYourWrites { + c.replicationStateStore.requireState(r) + } + + if limiter != nil { + limiter.Wait(ctx) + } + + // check the token before potentially erroring from the API + if err := validateToken(token); err != nil { + return nil, err + } + + redirectCount := 0 +START: + req, err := r.toRetryableHTTP() + if err != nil { + return nil, err + } + if req == nil { + return nil, fmt.Errorf("nil request created") + } + + if outputCurlString { + LastOutputStringError = &OutputStringError{ + Request: req, + TLSSkipVerify: c.config.HttpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, + ClientCert: c.config.curlClientCert, + ClientKey: c.config.curlClientKey, + ClientCACert: c.config.curlCACert, + ClientCAPath: c.config.curlCAPath, + } + return nil, LastOutputStringError + } + + if outputPolicy { + LastOutputPolicyError = &OutputPolicyError{ + method: req.Method, + path: strings.TrimPrefix(req.URL.Path, "/v1"), + } + return nil, LastOutputPolicyError + } + + req.Request = req.Request.WithContext(ctx) + + if backoff == nil { + backoff = retryablehttp.LinearJitterBackoff + } + + if checkRetry == nil { + checkRetry = DefaultRetryPolicy + } + + client := &retryablehttp.Client{ + HTTPClient: httpClient, + RetryWaitMin: minRetryWait, + RetryWaitMax: maxRetryWait, + RetryMax: maxRetries, + Backoff: backoff, + CheckRetry: checkRetry, + Logger: logger, + ErrorHandler: retryablehttp.PassthroughErrorHandler, + } + + var result *Response + resp, err := client.Do(req) + if resp != nil { + result = &Response{Response: resp} + } + if err != nil { + if strings.Contains(err.Error(), "tls: oversized") { + err = errwrap.Wrapf("{{err}}\n\n"+TLSErrorString, err) + } + return result, err + } + + // Check for a redirect, only allowing for a single redirect (if redirects aren't disabled) + if (resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307) && redirectCount == 0 && !disableRedirects { + // Parse the updated location + respLoc, err := resp.Location() + if err != nil { + return result, err + } + + // Ensure a protocol downgrade doesn't happen + if req.URL.Scheme == "https" && respLoc.Scheme != "https" { + return result, fmt.Errorf("redirect would cause protocol downgrade") + } + + // Update the request + r.URL = respLoc + + // Reset the request body if any + if err := r.ResetJSONBody(); err != nil { + return result, err + } + + // Retry the request + redirectCount++ + goto START + } + + if result != nil { + for _, cb := range c.responseCallbacks { + cb(result) + } + + if c.config.ReadYourWrites { + c.replicationStateStore.recordState(result) + } + } + if err := result.Error(); err != nil { + return result, err + } + + return result, nil +} + +// httpRequestWithContext avoids the use of the go-retryable library found in RawRequestWithContext and is +// useful when making calls where a net/http client is desirable. A single redirect (status code 301, 302, +// or 307) will be followed but all retry and timeout logic is the responsibility of the caller as is +// closing the Response body. +func (c *Client) httpRequestWithContext(ctx context.Context, r *Request) (*Response, error) { + req, err := http.NewRequestWithContext(ctx, r.Method, r.URL.RequestURI(), r.Body) + if err != nil { + return nil, err + } + + c.modifyLock.RLock() + token := c.token + + c.config.modifyLock.RLock() + limiter := c.config.Limiter + httpClient := c.config.HttpClient + outputCurlString := c.config.OutputCurlString + outputPolicy := c.config.OutputPolicy + disableRedirects := c.config.DisableRedirects + + // add headers + if c.headers != nil { + for header, vals := range c.headers { + for _, val := range vals { + req.Header.Add(header, val) + } + } + // explicitly set the namespace header to current client + if ns := c.headers.Get(consts.NamespaceHeaderName); ns != "" { + r.Headers.Set(consts.NamespaceHeaderName, ns) + } + } + + c.config.modifyLock.RUnlock() + c.modifyLock.RUnlock() + + // OutputCurlString and OutputPolicy logic rely on the request type to be retryable.Request + if outputCurlString { + return nil, fmt.Errorf("output-curl-string is not implemented for this request") + } + if outputPolicy { + return nil, fmt.Errorf("output-policy is not implemented for this request") + } + + req.URL.User = r.URL.User + req.URL.Scheme = r.URL.Scheme + req.URL.Host = r.URL.Host + req.Host = r.URL.Host + + if len(r.ClientToken) != 0 { + req.Header.Set(consts.AuthHeaderName, r.ClientToken) + } + + if len(r.WrapTTL) != 0 { + req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL) + } + + if len(r.MFAHeaderVals) != 0 { + for _, mfaHeaderVal := range r.MFAHeaderVals { + req.Header.Add("X-Vault-MFA", mfaHeaderVal) + } + } + + if r.PolicyOverride { + req.Header.Set("X-Vault-Policy-Override", "true") + } + + if limiter != nil { + limiter.Wait(ctx) + } + + // check the token before potentially erroring from the API + if err := validateToken(token); err != nil { + return nil, err + } + + var result *Response + + resp, err := httpClient.Do(req) + + if resp != nil { + result = &Response{Response: resp} + } + + if err != nil { + if strings.Contains(err.Error(), "tls: oversized") { + err = errwrap.Wrapf("{{err}}\n\n"+TLSErrorString, err) + } + return result, err + } + + // Check for a redirect, only allowing for a single redirect, if redirects aren't disabled + if (resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307) && !disableRedirects { + // Parse the updated location + respLoc, err := resp.Location() + if err != nil { + return result, fmt.Errorf("redirect failed: %s", err) + } + + // Ensure a protocol downgrade doesn't happen + if req.URL.Scheme == "https" && respLoc.Scheme != "https" { + return result, fmt.Errorf("redirect would cause protocol downgrade") + } + + // Update the request + req.URL = respLoc + + // Reset the request body if any + if err := r.ResetJSONBody(); err != nil { + return result, fmt.Errorf("redirect failed: %s", err) + } + + // Retry the request + resp, err = httpClient.Do(req) + if err != nil { + return result, fmt.Errorf("redirect failed: %s", err) + } + } + + if err := result.Error(); err != nil { + return nil, err + } + + return result, nil +} + +type ( + RequestCallback func(*Request) + ResponseCallback func(*Response) +) + +// WithRequestCallbacks makes a shallow clone of Client, modifies it to use +// the given callbacks, and returns it. Each of the callbacks will be invoked +// on every outgoing request. A client may be used to issue requests +// concurrently; any locking needed by callbacks invoked concurrently is the +// callback's responsibility. +func (c *Client) WithRequestCallbacks(callbacks ...RequestCallback) *Client { + c2 := *c + c2.modifyLock = sync.RWMutex{} + c2.requestCallbacks = callbacks + return &c2 +} + +// WithResponseCallbacks makes a shallow clone of Client, modifies it to use +// the given callbacks, and returns it. Each of the callbacks will be invoked +// on every received response. A client may be used to issue requests +// concurrently; any locking needed by callbacks invoked concurrently is the +// callback's responsibility. +func (c *Client) WithResponseCallbacks(callbacks ...ResponseCallback) *Client { + c2 := *c + c2.modifyLock = sync.RWMutex{} + c2.responseCallbacks = callbacks + return &c2 +} + +// withConfiguredTimeout wraps the context with a timeout from the client configuration. +func (c *Client) withConfiguredTimeout(ctx context.Context) (context.Context, context.CancelFunc) { + timeout := c.ClientTimeout() + + if timeout > 0 { + return context.WithTimeout(ctx, timeout) + } + + return ctx, func() {} +} + +// RecordState returns a response callback that will record the state returned +// by Vault in a response header. +func RecordState(state *string) ResponseCallback { + return func(resp *Response) { + *state = resp.Header.Get(HeaderIndex) + } +} + +// RequireState returns a request callback that will add a request header to +// specify the state we require of Vault. This state was obtained from a +// response header seen previous, probably captured with RecordState. +func RequireState(states ...string) RequestCallback { + return func(req *Request) { + for _, s := range states { + req.Headers.Add(HeaderIndex, s) + } + } +} + +// compareReplicationStates returns 1 if s1 is newer or identical, -1 if s1 is older, and 0 +// if neither s1 or s2 is strictly greater. An error is returned if s1 or s2 +// are invalid or from different clusters. +func compareReplicationStates(s1, s2 string) (int, error) { + w1, err := ParseReplicationState(s1, nil) + if err != nil { + return 0, err + } + w2, err := ParseReplicationState(s2, nil) + if err != nil { + return 0, err + } + + if w1.ClusterID != w2.ClusterID { + return 0, fmt.Errorf("can't compare replication states with different ClusterIDs") + } + + switch { + case w1.LocalIndex >= w2.LocalIndex && w1.ReplicatedIndex >= w2.ReplicatedIndex: + return 1, nil + // We've already handled the case where both are equal above, so really we're + // asking here if one or both are lesser. + case w1.LocalIndex <= w2.LocalIndex && w1.ReplicatedIndex <= w2.ReplicatedIndex: + return -1, nil + } + + return 0, nil +} + +// MergeReplicationStates returns a merged array of replication states by iterating +// through all states in `old`. An iterated state is merged to the result before `new` +// based on the result of compareReplicationStates +func MergeReplicationStates(old []string, new string) []string { + if len(old) == 0 || len(old) > 2 { + return []string{new} + } + + var ret []string + for _, o := range old { + c, err := compareReplicationStates(o, new) + if err != nil { + return []string{new} + } + switch c { + case 1: + ret = append(ret, o) + case -1: + ret = append(ret, new) + case 0: + ret = append(ret, o, new) + } + } + return strutil.RemoveDuplicates(ret, false) +} + +func ParseReplicationState(raw string, hmacKey []byte) (*logical.WALState, error) { + cooked, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, err + } + s := string(cooked) + + lastIndex := strings.LastIndexByte(s, ':') + if lastIndex == -1 { + return nil, fmt.Errorf("invalid full state header format") + } + state, stateHMACRaw := s[:lastIndex], s[lastIndex+1:] + stateHMAC, err := hex.DecodeString(stateHMACRaw) + if err != nil { + return nil, fmt.Errorf("invalid state header HMAC: %v, %w", stateHMACRaw, err) + } + + if len(hmacKey) != 0 { + hm := hmac.New(sha256.New, hmacKey) + hm.Write([]byte(state)) + if !hmac.Equal(hm.Sum(nil), stateHMAC) { + return nil, fmt.Errorf("invalid state header HMAC (mismatch)") + } + } + + pieces := strings.Split(state, ":") + if len(pieces) != 4 || pieces[0] != "v1" || pieces[1] == "" { + return nil, fmt.Errorf("invalid state header format") + } + localIndex, err := strconv.ParseUint(pieces[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid local index in state header: %w", err) + } + replicatedIndex, err := strconv.ParseUint(pieces[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid replicated index in state header: %w", err) + } + + return &logical.WALState{ + ClusterID: pieces[1], + LocalIndex: localIndex, + ReplicatedIndex: replicatedIndex, + }, nil +} + +// ForwardInconsistent returns a request callback that will add a request +// header which says: if the state required isn't present on the node receiving +// this request, forward it to the active node. This should be used in +// conjunction with RequireState. +func ForwardInconsistent() RequestCallback { + return func(req *Request) { + req.Headers.Set(HeaderInconsistent, "forward-active-node") + } +} + +// ForwardAlways returns a request callback which adds a header telling any +// performance standbys handling the request to forward it to the active node. +// This feature must be enabled in Vault's configuration. +func ForwardAlways() RequestCallback { + return func(req *Request) { + req.Headers.Set(HeaderForward, "active-node") + } +} + +// DefaultRetryPolicy is the default retry policy used by new Client objects. +// It is the same as retryablehttp.DefaultRetryPolicy except that it also retries +// 412 requests, which are returned by Vault when a X-Vault-Index header isn't +// satisfied. +func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + retry, err := retryablehttp.DefaultRetryPolicy(ctx, resp, err) + if err != nil || retry { + return retry, err + } + if resp != nil && resp.StatusCode == 412 { + return true, nil + } + return false, nil +} + +// replicationStateStore is used to track cluster replication states +// in order to ensure proper read-after-write semantics for a Client. +type replicationStateStore struct { + m sync.RWMutex + store []string +} + +// recordState updates the store's replication states with the merger of all +// states. +func (w *replicationStateStore) recordState(resp *Response) { + w.m.Lock() + defer w.m.Unlock() + newState := resp.Header.Get(HeaderIndex) + if newState != "" { + w.store = MergeReplicationStates(w.store, newState) + } +} + +// requireState updates the Request with the store's current replication states. +func (w *replicationStateStore) requireState(req *Request) { + w.m.RLock() + defer w.m.RUnlock() + for _, s := range w.store { + req.Headers.Add(HeaderIndex, s) + } +} + +// states currently stored. +func (w *replicationStateStore) states() []string { + w.m.RLock() + defer w.m.RUnlock() + c := make([]string, len(w.store)) + copy(c, w.store) + return c +} + +// validateToken will check for non-printable characters to prevent a call that will fail at the api +func validateToken(t string) error { + idx := strings.IndexFunc(t, func(c rune) bool { + return !unicode.IsPrint(c) + }) + if idx != -1 { + return fmt.Errorf("configured Vault token contains non-printable characters and cannot be used") + } + return nil +} diff --git a/vendor/github.com/hashicorp/vault/api/help.go b/vendor/github.com/hashicorp/vault/api/help.go new file mode 100644 index 00000000000..0988ebcd1fc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/help.go @@ -0,0 +1,37 @@ +package api + +import ( + "context" + "fmt" + "net/http" +) + +// Help wraps HelpWithContext using context.Background. +func (c *Client) Help(path string) (*Help, error) { + return c.HelpWithContext(context.Background(), path) +} + +// HelpWithContext reads the help information for the given path. +func (c *Client) HelpWithContext(ctx context.Context, path string) (*Help, error) { + ctx, cancelFunc := c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/%s", path)) + r.Params.Add("help", "1") + + resp, err := c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result Help + err = resp.DecodeJSON(&result) + return &result, err +} + +type Help struct { + Help string `json:"help"` + SeeAlso []string `json:"see_also"` + OpenAPI map[string]interface{} `json:"openapi"` +} diff --git a/vendor/github.com/hashicorp/vault/api/kv.go b/vendor/github.com/hashicorp/vault/api/kv.go new file mode 100644 index 00000000000..37699df266f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/kv.go @@ -0,0 +1,56 @@ +package api + +import "errors" + +// ErrSecretNotFound is returned by KVv1 and KVv2 wrappers to indicate that the +// secret is missing at the given location. +var ErrSecretNotFound = errors.New("secret not found") + +// A KVSecret is a key-value secret returned by Vault's KV secrets engine, +// and is the most basic type of secret stored in Vault. +// +// Data contains the key-value pairs of the secret itself, +// while Metadata contains a subset of metadata describing +// this particular version of the secret. +// The Metadata field for a KV v1 secret will always be nil, as +// metadata is only supported starting in KV v2. +// +// The Raw field can be inspected for information about the lease, +// and passed to a LifetimeWatcher object for periodic renewal. +type KVSecret struct { + Data map[string]interface{} + VersionMetadata *KVVersionMetadata + CustomMetadata map[string]interface{} + Raw *Secret +} + +// KVv1 is used to return a client for reads and writes against +// a KV v1 secrets engine in Vault. +// +// The mount path is the location where the target KV secrets engine resides +// in Vault. +// +// While v1 is not necessarily deprecated, Vault development servers tend to +// use v2 as the version of the KV secrets engine, as this is what's mounted +// by default when a server is started in -dev mode. See the kvv2 struct. +// +// Learn more about the KV secrets engine here: +// https://www.vaultproject.io/docs/secrets/kv +func (c *Client) KVv1(mountPath string) *KVv1 { + return &KVv1{c: c, mountPath: mountPath} +} + +// KVv2 is used to return a client for reads and writes against +// a KV v2 secrets engine in Vault. +// +// The mount path is the location where the target KV secrets engine resides +// in Vault. +// +// Vault development servers tend to have "secret" as the mount path, +// as these are the default settings when a server is started in -dev mode. +// +// Learn more about the KV secrets engine here: +// https://www.vaultproject.io/docs/secrets/kv +func (c *Client) KVv2(mountPath string) *KVv2 { + return &KVv2{c: c, mountPath: mountPath} +} diff --git a/vendor/github.com/hashicorp/vault/api/kv_v1.go b/vendor/github.com/hashicorp/vault/api/kv_v1.go new file mode 100644 index 00000000000..22ba992384b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/kv_v1.go @@ -0,0 +1,57 @@ +package api + +import ( + "context" + "fmt" +) + +type KVv1 struct { + c *Client + mountPath string +} + +// Get returns a secret from the KV v1 secrets engine. +func (kv *KVv1) Get(ctx context.Context, secretPath string) (*KVSecret, error) { + pathToRead := fmt.Sprintf("%s/%s", kv.mountPath, secretPath) + + secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) + if err != nil { + return nil, fmt.Errorf("error encountered while reading secret at %s: %w", pathToRead, err) + } + if secret == nil { + return nil, fmt.Errorf("%w: at %s", ErrSecretNotFound, pathToRead) + } + + return &KVSecret{ + Data: secret.Data, + VersionMetadata: nil, + Raw: secret, + }, nil +} + +// Put inserts a key-value secret (e.g. {"password": "Hashi123"}) into the +// KV v1 secrets engine. +// +// If the secret already exists, it will be overwritten. +func (kv *KVv1) Put(ctx context.Context, secretPath string, data map[string]interface{}) error { + pathToWriteTo := fmt.Sprintf("%s/%s", kv.mountPath, secretPath) + + _, err := kv.c.Logical().WriteWithContext(ctx, pathToWriteTo, data) + if err != nil { + return fmt.Errorf("error writing secret to %s: %w", pathToWriteTo, err) + } + + return nil +} + +// Delete deletes a secret from the KV v1 secrets engine. +func (kv *KVv1) Delete(ctx context.Context, secretPath string) error { + pathToDelete := fmt.Sprintf("%s/%s", kv.mountPath, secretPath) + + _, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete) + if err != nil { + return fmt.Errorf("error deleting secret at %s: %w", pathToDelete, err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/api/kv_v2.go b/vendor/github.com/hashicorp/vault/api/kv_v2.go new file mode 100644 index 00000000000..335c21001be --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/kv_v2.go @@ -0,0 +1,778 @@ +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + "sort" + "strconv" + "time" + + "github.com/mitchellh/mapstructure" +) + +type KVv2 struct { + c *Client + mountPath string +} + +// KVMetadata is the full metadata for a given KV v2 secret. +type KVMetadata struct { + CASRequired bool `mapstructure:"cas_required"` + CreatedTime time.Time `mapstructure:"created_time"` + CurrentVersion int `mapstructure:"current_version"` + CustomMetadata map[string]interface{} `mapstructure:"custom_metadata"` + DeleteVersionAfter time.Duration `mapstructure:"delete_version_after"` + MaxVersions int `mapstructure:"max_versions"` + OldestVersion int `mapstructure:"oldest_version"` + UpdatedTime time.Time `mapstructure:"updated_time"` + // Keys are stringified ints, e.g. "3". To get a sorted slice of version metadata, use GetVersionsAsList. + Versions map[string]KVVersionMetadata `mapstructure:"versions"` + Raw *Secret +} + +// KVMetadataPutInput is the subset of metadata that can be replaced for a +// KV v2 secret using the PutMetadata method. +// +// All fields should be explicitly provided, as any fields left unset in the +// struct will be reset to their zero value. +type KVMetadataPutInput struct { + CASRequired bool + CustomMetadata map[string]interface{} + DeleteVersionAfter time.Duration + MaxVersions int +} + +// KVMetadataPatchInput is the subset of metadata that can be manually modified for +// a KV v2 secret using the PatchMetadata method. +// +// The struct's fields are all pointers. A pointer to a field's zero +// value (e.g. false for *bool) implies that field should be reset to its +// zero value after update, whereas a field left as a nil pointer +// (e.g. nil for *bool) implies the field should remain unchanged. +// +// Since maps are already pointers, use an empty map to remove all +// custom metadata. +type KVMetadataPatchInput struct { + CASRequired *bool + CustomMetadata map[string]interface{} + DeleteVersionAfter *time.Duration + MaxVersions *int +} + +// KVVersionMetadata is a subset of metadata for a given version of a KV v2 secret. +type KVVersionMetadata struct { + Version int `mapstructure:"version"` + CreatedTime time.Time `mapstructure:"created_time"` + DeletionTime time.Time `mapstructure:"deletion_time"` + Destroyed bool `mapstructure:"destroyed"` +} + +// Currently supported options: WithOption, WithCheckAndSet, WithMethod +type KVOption func() (key string, value interface{}) + +const ( + KVOptionCheckAndSet = "cas" + KVOptionMethod = "method" + KVMergeMethodPatch = "patch" + KVMergeMethodReadWrite = "rw" +) + +// WithOption can optionally be passed to provide generic options for a +// KV request. Valid keys and values depend on the type of request. +func WithOption(key string, value interface{}) KVOption { + return func() (string, interface{}) { + return key, value + } +} + +// WithCheckAndSet can optionally be passed to perform a check-and-set +// operation on a KV request. If not set, the write will be allowed. +// If cas is set to 0, a write will only be allowed if the key doesn't exist. +// If set to non-zero, the write will only be allowed if the key’s current +// version matches the version specified in the cas parameter. +func WithCheckAndSet(cas int) KVOption { + return WithOption(KVOptionCheckAndSet, cas) +} + +// WithMergeMethod can optionally be passed to dictate which type of +// patch to perform in a Patch request. If set to "patch", then an HTTP PATCH +// request will be issued. If set to "rw", then a read will be performed, +// then a local update, followed by a remote update. Defaults to "patch". +func WithMergeMethod(method string) KVOption { + return WithOption(KVOptionMethod, method) +} + +// Get returns the latest version of a secret from the KV v2 secrets engine. +// +// If the latest version has been deleted, an error will not be thrown, but +// the Data field on the returned secret will be nil, and the Metadata field +// will contain the deletion time. +func (kv *KVv2) Get(ctx context.Context, secretPath string) (*KVSecret, error) { + pathToRead := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) + + secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) + if err != nil { + return nil, fmt.Errorf("error encountered while reading secret at %s: %w", pathToRead, err) + } + if secret == nil { + return nil, fmt.Errorf("%w: at %s", ErrSecretNotFound, pathToRead) + } + + kvSecret, err := extractDataAndVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("error parsing secret at %s: %w", pathToRead, err) + } + + kvSecret.CustomMetadata = extractCustomMetadata(secret) + + return kvSecret, nil +} + +// GetVersion returns the data and metadata for a specific version of the +// given secret. +// +// If that version has been deleted, the Data field on the +// returned secret will be nil, and the Metadata field will contain the deletion time. +// +// GetVersionsAsList can provide a list of available versions sorted by +// version number, while the response from GetMetadata contains them as a map. +func (kv *KVv2) GetVersion(ctx context.Context, secretPath string, version int) (*KVSecret, error) { + pathToRead := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) + + queryParams := map[string][]string{"version": {strconv.Itoa(version)}} + secret, err := kv.c.Logical().ReadWithDataWithContext(ctx, pathToRead, queryParams) + if err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("%w: for version %d at %s", ErrSecretNotFound, version, pathToRead) + } + + kvSecret, err := extractDataAndVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("error parsing secret at %s: %w", pathToRead, err) + } + + kvSecret.CustomMetadata = extractCustomMetadata(secret) + + return kvSecret, nil +} + +// GetVersionsAsList returns a subset of the metadata for each version of the secret, sorted by version number. +func (kv *KVv2) GetVersionsAsList(ctx context.Context, secretPath string) ([]KVVersionMetadata, error) { + pathToRead := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, fmt.Errorf("%w: no metadata at %s", ErrSecretNotFound, pathToRead) + } + + md, err := extractFullMetadata(secret) + if err != nil { + return nil, fmt.Errorf("unable to extract metadata from secret to determine versions: %w", err) + } + + versionsList := make([]KVVersionMetadata, 0, len(md.Versions)) + for _, versionMetadata := range md.Versions { + versionsList = append(versionsList, versionMetadata) + } + + sort.Slice(versionsList, func(i, j int) bool { return versionsList[i].Version < versionsList[j].Version }) + return versionsList, nil +} + +// GetMetadata returns the full metadata for a given secret, including a map of +// its existing versions and their respective creation/deletion times, etc. +func (kv *KVv2) GetMetadata(ctx context.Context, secretPath string) (*KVMetadata, error) { + pathToRead := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, fmt.Errorf("%w: no metadata at %s", ErrSecretNotFound, pathToRead) + } + + md, err := extractFullMetadata(secret) + if err != nil { + return nil, fmt.Errorf("unable to extract metadata from secret: %w", err) + } + + return md, nil +} + +// Put inserts a key-value secret (e.g. {"password": "Hashi123"}) +// into the KV v2 secrets engine. +// +// If the secret already exists, a new version will be created +// and the previous version can be accessed with the GetVersion method. +// GetMetadata can provide a list of available versions. +func (kv *KVv2) Put(ctx context.Context, secretPath string, data map[string]interface{}, opts ...KVOption) (*KVSecret, error) { + pathToWriteTo := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) + + wrappedData := map[string]interface{}{ + "data": data, + } + + // Add options such as check-and-set, etc. + // We leave this as an optional arg so that most users + // can just pass plain key-value secret data without + // having to remember to put the extra layer "data" in there. + options := make(map[string]interface{}) + for _, opt := range opts { + k, v := opt() + options[k] = v + } + if len(opts) > 0 { + wrappedData["options"] = options + } + + secret, err := kv.c.Logical().WriteWithContext(ctx, pathToWriteTo, wrappedData) + if err != nil { + return nil, fmt.Errorf("error writing secret to %s: %w", pathToWriteTo, err) + } + if secret == nil { + return nil, fmt.Errorf("%w: after writing to %s", ErrSecretNotFound, pathToWriteTo) + } + + metadata, err := extractVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("secret was written successfully, but unable to view version metadata from response: %w", err) + } + + kvSecret := &KVSecret{ + Data: nil, // secret.Data in this case is the metadata + VersionMetadata: metadata, + Raw: secret, + } + + kvSecret.CustomMetadata = extractCustomMetadata(secret) + + return kvSecret, nil +} + +// PutMetadata can be used to fully replace a subset of metadata fields for a +// given KV v2 secret. All fields will replace the corresponding values on the Vault server. +// Any fields left as nil will reset the field on the Vault server back to its zero value. +// +// To only partially replace the values of these metadata fields, use PatchMetadata. +// +// This method can also be used to create a new secret with just metadata and no secret data yet. +func (kv *KVv2) PutMetadata(ctx context.Context, secretPath string, metadata KVMetadataPutInput) error { + pathToWriteTo := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + const ( + casRequiredKey = "cas_required" + deleteVersionAfterKey = "delete_version_after" + maxVersionsKey = "max_versions" + customMetadataKey = "custom_metadata" + ) + + // convert values to a map we can pass to Logical + metadataMap := make(map[string]interface{}) + metadataMap[maxVersionsKey] = metadata.MaxVersions + metadataMap[deleteVersionAfterKey] = metadata.DeleteVersionAfter.String() + metadataMap[casRequiredKey] = metadata.CASRequired + metadataMap[customMetadataKey] = metadata.CustomMetadata + + _, err := kv.c.Logical().WriteWithContext(ctx, pathToWriteTo, metadataMap) + if err != nil { + return fmt.Errorf("error writing secret metadata to %s: %w", pathToWriteTo, err) + } + + return nil +} + +// Patch additively updates the most recent version of a key-value secret, +// differentiating it from Put which will fully overwrite the previous data. +// Only the key-value pairs that are new or changing need to be provided. +// +// The WithMethod KVOption function can optionally be passed to dictate which +// kind of patch to perform, as older Vault server versions (pre-1.9.0) may +// only be able to use the old "rw" (read-then-write) style of partial update, +// whereas newer Vault servers can use the default value of "patch" if the +// client token's policy has the "patch" capability. +func (kv *KVv2) Patch(ctx context.Context, secretPath string, newData map[string]interface{}, opts ...KVOption) (*KVSecret, error) { + // determine patch method + var patchMethod string + var ok bool + for _, opt := range opts { + k, v := opt() + if k == "method" { + patchMethod, ok = v.(string) + if !ok { + return nil, fmt.Errorf("unsupported type provided for option value; value for patch method should be string \"rw\" or \"patch\"") + } + } + } + + // Determine which kind of patch to use, + // the newer HTTP Patch style or the older read-then-write style + var kvs *KVSecret + var err error + switch patchMethod { + case "rw": + kvs, err = readThenWrite(ctx, kv.c, kv.mountPath, secretPath, newData) + case "patch": + kvs, err = mergePatch(ctx, kv.c, kv.mountPath, secretPath, newData, opts...) + case "": + kvs, err = mergePatch(ctx, kv.c, kv.mountPath, secretPath, newData, opts...) + default: + return nil, fmt.Errorf("unsupported patch method provided; value for patch method should be string \"rw\" or \"patch\"") + } + if err != nil { + return nil, fmt.Errorf("unable to perform patch: %w", err) + } + if kvs == nil { + return nil, fmt.Errorf("no secret was written to %s", secretPath) + } + + return kvs, nil +} + +// PatchMetadata can be used to replace just a subset of a secret's +// metadata fields at a time, as opposed to PutMetadata which is used to +// completely replace all fields on the previous metadata. +func (kv *KVv2) PatchMetadata(ctx context.Context, secretPath string, metadata KVMetadataPatchInput) error { + pathToWriteTo := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + md, err := toMetadataMap(metadata) + if err != nil { + return fmt.Errorf("unable to create map for JSON merge patch request: %w", err) + } + + _, err = kv.c.Logical().JSONMergePatch(ctx, pathToWriteTo, md) + if err != nil { + return fmt.Errorf("error patching metadata at %s: %w", pathToWriteTo, err) + } + + return nil +} + +// Delete deletes the most recent version of a secret from the KV v2 +// secrets engine. To delete an older version, use DeleteVersions. +func (kv *KVv2) Delete(ctx context.Context, secretPath string) error { + pathToDelete := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) + + _, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete) + if err != nil { + return fmt.Errorf("error deleting secret at %s: %w", pathToDelete, err) + } + + return nil +} + +// DeleteVersions deletes the specified versions of a secret from the KV v2 +// secrets engine. To delete the latest version of a secret, just use Delete. +func (kv *KVv2) DeleteVersions(ctx context.Context, secretPath string, versions []int) error { + // verb and path are different when trying to delete past versions + pathToDelete := fmt.Sprintf("%s/delete/%s", kv.mountPath, secretPath) + + if len(versions) == 0 { + return nil + } + + var versionsToDelete []string + for _, version := range versions { + versionsToDelete = append(versionsToDelete, strconv.Itoa(version)) + } + versionsMap := map[string]interface{}{ + "versions": versionsToDelete, + } + _, err := kv.c.Logical().WriteWithContext(ctx, pathToDelete, versionsMap) + if err != nil { + return fmt.Errorf("error deleting secret at %s: %w", pathToDelete, err) + } + + return nil +} + +// DeleteMetadata deletes all versions and metadata of the secret at the +// given path. +func (kv *KVv2) DeleteMetadata(ctx context.Context, secretPath string) error { + pathToDelete := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + _, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete) + if err != nil { + return fmt.Errorf("error deleting secret metadata at %s: %w", pathToDelete, err) + } + + return nil +} + +// Undelete undeletes the given versions of a secret, restoring the data +// so that it can be fetched again with Get requests. +// +// A list of existing versions can be retrieved using the GetVersionsAsList method. +func (kv *KVv2) Undelete(ctx context.Context, secretPath string, versions []int) error { + pathToUndelete := fmt.Sprintf("%s/undelete/%s", kv.mountPath, secretPath) + + data := map[string]interface{}{ + "versions": versions, + } + + _, err := kv.c.Logical().WriteWithContext(ctx, pathToUndelete, data) + if err != nil { + return fmt.Errorf("error undeleting secret metadata at %s: %w", pathToUndelete, err) + } + + return nil +} + +// Destroy permanently removes the specified secret versions' data +// from the Vault server. If no secret exists at the given path, no +// action will be taken. +// +// A list of existing versions can be retrieved using the GetVersionsAsList method. +func (kv *KVv2) Destroy(ctx context.Context, secretPath string, versions []int) error { + pathToDestroy := fmt.Sprintf("%s/destroy/%s", kv.mountPath, secretPath) + + data := map[string]interface{}{ + "versions": versions, + } + + _, err := kv.c.Logical().WriteWithContext(ctx, pathToDestroy, data) + if err != nil { + return fmt.Errorf("error destroying secret metadata at %s: %w", pathToDestroy, err) + } + + return nil +} + +// Rollback can be used to roll a secret back to a previous +// non-deleted/non-destroyed version. That previous version becomes the +// next/newest version for the path. +func (kv *KVv2) Rollback(ctx context.Context, secretPath string, toVersion int) (*KVSecret, error) { + // First, do a read to get the current version for check-and-set + latest, err := kv.Get(ctx, secretPath) + if err != nil { + return nil, fmt.Errorf("unable to get latest version of secret: %w", err) + } + + // Make sure a value already exists + if latest == nil { + return nil, fmt.Errorf("no secret was found: %w", err) + } + + // Verify metadata found + if latest.VersionMetadata == nil { + return nil, fmt.Errorf("no metadata found; rollback can only be used on existing data") + } + + // Now run it again and read the version we want to roll back to + rollbackVersion, err := kv.GetVersion(ctx, secretPath, toVersion) + if err != nil { + return nil, fmt.Errorf("unable to get previous version %d of secret: %w", toVersion, err) + } + + err = validateRollbackVersion(rollbackVersion) + if err != nil { + return nil, fmt.Errorf("invalid rollback version %d: %w", toVersion, err) + } + + casVersion := latest.VersionMetadata.Version + kvs, err := kv.Put(ctx, secretPath, rollbackVersion.Data, WithCheckAndSet(casVersion)) + if err != nil { + return nil, fmt.Errorf("unable to roll back to previous secret version: %w", err) + } + + return kvs, nil +} + +func extractCustomMetadata(secret *Secret) map[string]interface{} { + // Logical Writes return the metadata directly, Reads return it nested inside the "metadata" key + customMetadataInterface, ok := secret.Data["custom_metadata"] + if !ok { + metadataInterface := secret.Data["metadata"] + metadataMap, ok := metadataInterface.(map[string]interface{}) + if !ok { + return nil + } + customMetadataInterface = metadataMap["custom_metadata"] + } + + cm, ok := customMetadataInterface.(map[string]interface{}) + if !ok { + return nil + } + + return cm +} + +func extractDataAndVersionMetadata(secret *Secret) (*KVSecret, error) { + // A nil map is a valid value for data: secret.Data will be nil when this + // version of the secret has been deleted, but the metadata is still + // available. + var data map[string]interface{} + if secret.Data != nil { + dataInterface, ok := secret.Data["data"] + if !ok { + return nil, fmt.Errorf("missing expected 'data' element") + } + + if dataInterface != nil { + data, ok = dataInterface.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unexpected type for 'data' element: %T (%#v)", data, data) + } + } + } + + metadata, err := extractVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("unable to get version metadata: %w", err) + } + + return &KVSecret{ + Data: data, + VersionMetadata: metadata, + Raw: secret, + }, nil +} + +func extractVersionMetadata(secret *Secret) (*KVVersionMetadata, error) { + var metadata *KVVersionMetadata + + if secret.Data == nil { + return nil, nil + } + + // Logical Writes return the metadata directly, Reads return it nested inside the "metadata" key + var metadataMap map[string]interface{} + metadataInterface, ok := secret.Data["metadata"] + if ok { + metadataMap, ok = metadataInterface.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unexpected type for 'metadata' element: %T (%#v)", metadataInterface, metadataInterface) + } + } else { + metadataMap = secret.Data + } + + // deletion_time usually comes in as an empty string which can't be + // processed as time.RFC3339, so we reset it to a convertible value + if metadataMap["deletion_time"] == "" { + metadataMap["deletion_time"] = time.Time{} + } + + d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeHookFunc(time.RFC3339), + Result: &metadata, + }) + if err != nil { + return nil, fmt.Errorf("error setting up decoder for API response: %w", err) + } + + err = d.Decode(metadataMap) + if err != nil { + return nil, fmt.Errorf("error decoding metadata from API response into VersionMetadata: %w", err) + } + + return metadata, nil +} + +func extractFullMetadata(secret *Secret) (*KVMetadata, error) { + var metadata *KVMetadata + + if secret.Data == nil { + return nil, nil + } + + if versions, ok := secret.Data["versions"]; ok { + versionsMap := versions.(map[string]interface{}) + if len(versionsMap) > 0 { + for version, metadata := range versionsMap { + metadataMap := metadata.(map[string]interface{}) + // deletion_time usually comes in as an empty string which can't be + // processed as time.RFC3339, so we reset it to a convertible value + if metadataMap["deletion_time"] == "" { + metadataMap["deletion_time"] = time.Time{} + } + versionInt, err := strconv.Atoi(version) + if err != nil { + return nil, fmt.Errorf("error converting version %s to integer: %w", version, err) + } + metadataMap["version"] = versionInt + versionsMap[version] = metadataMap // save the updated copy of the metadata map + } + } + secret.Data["versions"] = versionsMap // save the updated copy of the versions map + } + + d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeHookFunc(time.RFC3339), + mapstructure.StringToTimeDurationHookFunc(), + ), + Result: &metadata, + }) + if err != nil { + return nil, fmt.Errorf("error setting up decoder for API response: %w", err) + } + + err = d.Decode(secret.Data) + if err != nil { + return nil, fmt.Errorf("error decoding metadata from API response into KVMetadata: %w", err) + } + + return metadata, nil +} + +func validateRollbackVersion(rollbackVersion *KVSecret) error { + // Make sure a value already exists + if rollbackVersion == nil || rollbackVersion.Data == nil { + return fmt.Errorf("no secret found") + } + + // Verify metadata found + if rollbackVersion.VersionMetadata == nil { + return fmt.Errorf("no version metadata found; rollback only works on existing data") + } + + // Verify it hasn't been deleted + if !rollbackVersion.VersionMetadata.DeletionTime.IsZero() { + return fmt.Errorf("cannot roll back to a version that has been deleted") + } + + if rollbackVersion.VersionMetadata.Destroyed { + return fmt.Errorf("cannot roll back to a version that has been destroyed") + } + + // Verify old data found + if rollbackVersion.Data == nil { + return fmt.Errorf("no data found; rollback only works on existing data") + } + + return nil +} + +func mergePatch(ctx context.Context, client *Client, mountPath string, secretPath string, newData map[string]interface{}, opts ...KVOption) (*KVSecret, error) { + pathToMergePatch := fmt.Sprintf("%s/data/%s", mountPath, secretPath) + + // take any other additional options provided + // and pass them along to the patch request + wrappedData := map[string]interface{}{ + "data": newData, + } + options := make(map[string]interface{}) + for _, opt := range opts { + k, v := opt() + options[k] = v + } + if len(opts) > 0 { + wrappedData["options"] = options + } + + secret, err := client.Logical().JSONMergePatch(ctx, pathToMergePatch, wrappedData) + if err != nil { + var re *ResponseError + + if errors.As(err, &re) { + switch re.StatusCode { + // 403 + case http.StatusForbidden: + return nil, fmt.Errorf("received 403 from Vault server; please ensure that token's policy has \"patch\" capability: %w", err) + + // 404 + case http.StatusNotFound: + return nil, fmt.Errorf("%w: performing merge patch to %s", ErrSecretNotFound, pathToMergePatch) + + // 405 + case http.StatusMethodNotAllowed: + // If it's a 405, that probably means the server is running a pre-1.9 + // Vault version that doesn't support the HTTP PATCH method. + // Fall back to the old way of doing it. + return readThenWrite(ctx, client, mountPath, secretPath, newData) + } + } + + return nil, fmt.Errorf("error performing merge patch to %s: %w", pathToMergePatch, err) + } + + metadata, err := extractVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("secret was written successfully, but unable to view version metadata from response: %w", err) + } + + kvSecret := &KVSecret{ + Data: nil, // secret.Data in this case is the metadata + VersionMetadata: metadata, + Raw: secret, + } + + kvSecret.CustomMetadata = extractCustomMetadata(secret) + + return kvSecret, nil +} + +func readThenWrite(ctx context.Context, client *Client, mountPath string, secretPath string, newData map[string]interface{}) (*KVSecret, error) { + // First, read the secret. + existingVersion, err := client.KVv2(mountPath).Get(ctx, secretPath) + if err != nil { + return nil, fmt.Errorf("error reading secret as part of read-then-write patch operation: %w", err) + } + + // Make sure the secret already exists + if existingVersion == nil || existingVersion.Data == nil { + return nil, fmt.Errorf("%w: at %s as part of read-then-write patch operation", ErrSecretNotFound, secretPath) + } + + // Verify existing secret has metadata + if existingVersion.VersionMetadata == nil { + return nil, fmt.Errorf("no metadata found at %s; patch can only be used on existing data", secretPath) + } + + // Copy new data over with existing data + combinedData := existingVersion.Data + for k, v := range newData { + combinedData[k] = v + } + + updatedSecret, err := client.KVv2(mountPath).Put(ctx, secretPath, combinedData, WithCheckAndSet(existingVersion.VersionMetadata.Version)) + if err != nil { + return nil, fmt.Errorf("error writing secret to %s: %w", secretPath, err) + } + + return updatedSecret, nil +} + +func toMetadataMap(patchInput KVMetadataPatchInput) (map[string]interface{}, error) { + metadataMap := make(map[string]interface{}) + + const ( + casRequiredKey = "cas_required" + deleteVersionAfterKey = "delete_version_after" + maxVersionsKey = "max_versions" + customMetadataKey = "custom_metadata" + ) + + // The KVMetadataPatchInput struct is designed to have pointer fields so that + // the user can easily express the difference between explicitly setting a + // field back to its zero value (e.g. false), as opposed to just having + // the field remain unchanged (e.g. nil). This way, they only need to pass + // the fields they want to change. + if patchInput.MaxVersions != nil { + metadataMap[maxVersionsKey] = *(patchInput.MaxVersions) + } + if patchInput.CASRequired != nil { + metadataMap[casRequiredKey] = *(patchInput.CASRequired) + } + if patchInput.CustomMetadata != nil { + if len(patchInput.CustomMetadata) == 0 { // empty non-nil map means delete all the keys + metadataMap[customMetadataKey] = nil + } else { + metadataMap[customMetadataKey] = patchInput.CustomMetadata + } + } + if patchInput.DeleteVersionAfter != nil { + metadataMap[deleteVersionAfterKey] = patchInput.DeleteVersionAfter.String() + } + + return metadataMap, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go new file mode 100644 index 00000000000..5f3eadbffdd --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go @@ -0,0 +1,403 @@ +package api + +import ( + "errors" + "math/rand" + "sync" + "time" + + "github.com/cenkalti/backoff/v3" +) + +var ( + ErrLifetimeWatcherMissingInput = errors.New("missing input") + ErrLifetimeWatcherMissingSecret = errors.New("missing secret") + ErrLifetimeWatcherNotRenewable = errors.New("secret is not renewable") + ErrLifetimeWatcherNoSecretData = errors.New("returned empty secret data") + + // Deprecated; kept for compatibility + ErrRenewerMissingInput = errors.New("missing input to renewer") + ErrRenewerMissingSecret = errors.New("missing secret to renew") + ErrRenewerNotRenewable = errors.New("secret is not renewable") + ErrRenewerNoSecretData = errors.New("returned empty secret data") + + // DefaultLifetimeWatcherRenewBuffer is the default size of the buffer for renew + // messages on the channel. + DefaultLifetimeWatcherRenewBuffer = 5 + // Deprecated: kept for backwards compatibility + DefaultRenewerRenewBuffer = 5 +) + +type RenewBehavior uint + +const ( + // RenewBehaviorIgnoreErrors means we will attempt to keep renewing until + // we hit the lifetime threshold. It also ignores errors stemming from + // passing a non-renewable lease in. In practice, this means you simply + // reauthenticate/refetch credentials when the watcher exits. This is the + // default. + RenewBehaviorIgnoreErrors RenewBehavior = iota + + // RenewBehaviorRenewDisabled turns off renewal attempts entirely. This + // allows you to simply watch lifetime and have the watcher return at a + // reasonable threshold without actually making Vault calls. + RenewBehaviorRenewDisabled + + // RenewBehaviorErrorOnErrors is the "legacy" behavior which always exits + // on some kind of error + RenewBehaviorErrorOnErrors +) + +// LifetimeWatcher is a process for watching lifetime of a secret. +// +// watcher, err := client.NewLifetimeWatcher(&LifetimeWatcherInput{ +// Secret: mySecret, +// }) +// go watcher.Start() +// defer watcher.Stop() +// +// for { +// select { +// case err := <-watcher.DoneCh(): +// if err != nil { +// log.Fatal(err) +// } +// +// // Renewal is now over +// case renewal := <-watcher.RenewCh(): +// log.Printf("Successfully renewed: %#v", renewal) +// } +// } +// +// `DoneCh` will return if renewal fails, or if the remaining lease duration is +// under a built-in threshold and either renewing is not extending it or +// renewing is disabled. In both cases, the caller should attempt a re-read of +// the secret. Clients should check the return value of the channel to see if +// renewal was successful. +type LifetimeWatcher struct { + l sync.Mutex + + client *Client + secret *Secret + grace time.Duration + random *rand.Rand + increment int + doneCh chan error + renewCh chan *RenewOutput + renewBehavior RenewBehavior + + stopped bool + stopCh chan struct{} + + errLifetimeWatcherNotRenewable error + errLifetimeWatcherNoSecretData error +} + +// LifetimeWatcherInput is used as input to the renew function. +type LifetimeWatcherInput struct { + // Secret is the secret to renew + Secret *Secret + + // DEPRECATED: this does not do anything. + Grace time.Duration + + // Rand is the randomizer to use for underlying randomization. If not + // provided, one will be generated and seeded automatically. If provided, it + // is assumed to have already been seeded. + Rand *rand.Rand + + // RenewBuffer is the size of the buffered channel where renew messages are + // dispatched. + RenewBuffer int + + // The new TTL, in seconds, that should be set on the lease. The TTL set + // here may or may not be honored by the vault server, based on Vault + // configuration or any associated max TTL values. If specified, the + // minimum of this value and the remaining lease duration will be used + // for grace period calculations. + Increment int + + // RenewBehavior controls what happens when a renewal errors or the + // passed-in secret is not renewable. + RenewBehavior RenewBehavior +} + +// RenewOutput is the metadata returned to the client (if it's listening) to +// renew messages. +type RenewOutput struct { + // RenewedAt is the timestamp when the renewal took place (UTC). + RenewedAt time.Time + + // Secret is the underlying renewal data. It's the same struct as all data + // that is returned from Vault, but since this is renewal data, it will not + // usually include the secret itself. + Secret *Secret +} + +// NewLifetimeWatcher creates a new renewer from the given input. +func (c *Client) NewLifetimeWatcher(i *LifetimeWatcherInput) (*LifetimeWatcher, error) { + if i == nil { + return nil, ErrLifetimeWatcherMissingInput + } + + secret := i.Secret + if secret == nil { + return nil, ErrLifetimeWatcherMissingSecret + } + + random := i.Rand + if random == nil { + random = rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + } + + renewBuffer := i.RenewBuffer + if renewBuffer == 0 { + renewBuffer = DefaultLifetimeWatcherRenewBuffer + } + + return &LifetimeWatcher{ + client: c, + secret: secret, + increment: i.Increment, + random: random, + doneCh: make(chan error, 1), + renewCh: make(chan *RenewOutput, renewBuffer), + renewBehavior: i.RenewBehavior, + + stopped: false, + stopCh: make(chan struct{}), + + errLifetimeWatcherNotRenewable: ErrLifetimeWatcherNotRenewable, + errLifetimeWatcherNoSecretData: ErrLifetimeWatcherNoSecretData, + }, nil +} + +// Deprecated: exists only for backwards compatibility. Calls +// NewLifetimeWatcher, and sets compatibility flags. +func (c *Client) NewRenewer(i *LifetimeWatcherInput) (*LifetimeWatcher, error) { + if i == nil { + return nil, ErrRenewerMissingInput + } + + secret := i.Secret + if secret == nil { + return nil, ErrRenewerMissingSecret + } + + renewer, err := c.NewLifetimeWatcher(i) + if err != nil { + return nil, err + } + + renewer.renewBehavior = RenewBehaviorErrorOnErrors + renewer.errLifetimeWatcherNotRenewable = ErrRenewerNotRenewable + renewer.errLifetimeWatcherNoSecretData = ErrRenewerNoSecretData + return renewer, err +} + +// DoneCh returns the channel where the renewer will publish when renewal stops. +// If there is an error, this will be an error. +func (r *LifetimeWatcher) DoneCh() <-chan error { + return r.doneCh +} + +// RenewCh is a channel that receives a message when a successful renewal takes +// place and includes metadata about the renewal. +func (r *LifetimeWatcher) RenewCh() <-chan *RenewOutput { + return r.renewCh +} + +// Stop stops the renewer. +func (r *LifetimeWatcher) Stop() { + r.l.Lock() + defer r.l.Unlock() + + if !r.stopped { + close(r.stopCh) + r.stopped = true + } +} + +// Start starts a background process for watching the lifetime of this secret. +// If renewal is enabled, when the secret has auth data, this attempts to renew +// the auth (token); When the secret has a lease, this attempts to renew the +// lease. +func (r *LifetimeWatcher) Start() { + r.doneCh <- r.doRenew() +} + +// Renew is for compatibility with the legacy api.Renewer. Calling Renew +// simply chains to Start. +func (r *LifetimeWatcher) Renew() { + r.Start() +} + +type renewFunc func(string, int) (*Secret, error) + +// doRenew is a helper for renewing authentication. +func (r *LifetimeWatcher) doRenew() error { + defaultInitialRetryInterval := 10 * time.Second + switch { + case r.secret.Auth != nil: + return r.doRenewWithOptions(true, !r.secret.Auth.Renewable, + r.secret.Auth.LeaseDuration, r.secret.Auth.ClientToken, + r.client.Auth().Token().RenewTokenAsSelf, defaultInitialRetryInterval) + default: + return r.doRenewWithOptions(false, !r.secret.Renewable, + r.secret.LeaseDuration, r.secret.LeaseID, + r.client.Sys().Renew, defaultInitialRetryInterval) + } +} + +func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, initLeaseDuration int, credString string, + renew renewFunc, initialRetryInterval time.Duration, +) error { + if credString == "" || + (nonRenewable && r.renewBehavior == RenewBehaviorErrorOnErrors) { + return r.errLifetimeWatcherNotRenewable + } + + initialTime := time.Now() + priorDuration := time.Duration(initLeaseDuration) * time.Second + r.calculateGrace(priorDuration, time.Duration(r.increment)*time.Second) + var errorBackoff backoff.BackOff + + for { + // Check if we are stopped. + select { + case <-r.stopCh: + return nil + default: + } + + var remainingLeaseDuration time.Duration + fallbackLeaseDuration := initialTime.Add(priorDuration).Sub(time.Now()) + var renewal *Secret + var err error + + switch { + case nonRenewable || r.renewBehavior == RenewBehaviorRenewDisabled: + // Can't or won't renew, just keep the same expiration so we exit + // when it's reauthentication time + remainingLeaseDuration = fallbackLeaseDuration + + default: + // Renew the token + renewal, err = renew(credString, r.increment) + if err != nil || renewal == nil || (tokenMode && renewal.Auth == nil) { + if r.renewBehavior == RenewBehaviorErrorOnErrors { + if err != nil { + return err + } + if renewal == nil || (tokenMode && renewal.Auth == nil) { + return r.errLifetimeWatcherNoSecretData + } + } + + // Calculate remaining duration until initial token lease expires + remainingLeaseDuration = initialTime.Add(time.Duration(initLeaseDuration) * time.Second).Sub(time.Now()) + if errorBackoff == nil { + errorBackoff = &backoff.ExponentialBackOff{ + MaxElapsedTime: remainingLeaseDuration, + RandomizationFactor: backoff.DefaultRandomizationFactor, + InitialInterval: initialRetryInterval, + MaxInterval: 5 * time.Minute, + Multiplier: 2, + Clock: backoff.SystemClock, + } + errorBackoff.Reset() + } + break + } + errorBackoff = nil + + // Push a message that a renewal took place. + select { + case r.renewCh <- &RenewOutput{time.Now().UTC(), renewal}: + default: + } + + // Possibly error if we are not renewable + if ((tokenMode && !renewal.Auth.Renewable) || (!tokenMode && !renewal.Renewable)) && + r.renewBehavior == RenewBehaviorErrorOnErrors { + return r.errLifetimeWatcherNotRenewable + } + + // Reset initial time + initialTime = time.Now() + + // Grab the lease duration + initLeaseDuration = renewal.LeaseDuration + if tokenMode { + initLeaseDuration = renewal.Auth.LeaseDuration + } + + remainingLeaseDuration = time.Duration(initLeaseDuration) * time.Second + } + + var sleepDuration time.Duration + + if errorBackoff != nil { + sleepDuration = errorBackoff.NextBackOff() + if sleepDuration == backoff.Stop { + return err + } + } else { + // We keep evaluating a new grace period so long as the lease is + // extending. Once it stops extending, we've hit the max and need to + // rely on the grace duration. + if remainingLeaseDuration > priorDuration { + r.calculateGrace(remainingLeaseDuration, time.Duration(r.increment)*time.Second) + } + priorDuration = remainingLeaseDuration + + // The sleep duration is set to 2/3 of the current lease duration plus + // 1/3 of the current grace period, which adds jitter. + sleepDuration = time.Duration(float64(remainingLeaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3) + } + + // If we are within grace, return now; or, if the amount of time we + // would sleep would land us in the grace period. This helps with short + // tokens; for example, you don't want a current lease duration of 4 + // seconds, a grace period of 3 seconds, and end up sleeping for more + // than three of those seconds and having a very small budget of time + // to renew. + if remainingLeaseDuration <= r.grace || remainingLeaseDuration-sleepDuration <= r.grace { + return nil + } + + select { + case <-r.stopCh: + return nil + case <-time.After(sleepDuration): + continue + } + } +} + +// calculateGrace calculates the grace period based on the minimum of the +// remaining lease duration and the token increment value; it also adds some +// jitter to not have clients be in sync. +func (r *LifetimeWatcher) calculateGrace(leaseDuration, increment time.Duration) { + minDuration := leaseDuration + if minDuration > increment && increment > 0 { + minDuration = increment + } + + if minDuration <= 0 { + r.grace = 0 + return + } + + leaseNanos := float64(minDuration.Nanoseconds()) + jitterMax := 0.1 * leaseNanos + + // For a given lease duration, we want to allow 80-90% of that to elapse, + // so the remaining amount is the grace period + r.grace = time.Duration(jitterMax) + time.Duration(uint64(r.random.Int63())%uint64(jitterMax)) +} + +type ( + Renewer = LifetimeWatcher + RenewerInput = LifetimeWatcherInput +) diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go new file mode 100644 index 00000000000..d2e5bb5e5e1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/logical.go @@ -0,0 +1,366 @@ +package api + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +const ( + wrappedResponseLocation = "cubbyhole/response" +) + +var ( + // The default TTL that will be used with `sys/wrapping/wrap`, can be + // changed + DefaultWrappingTTL = "5m" + + // The default function used if no other function is set. It honors the env + // var to set the wrap TTL. The default wrap TTL will apply when when writing + // to `sys/wrapping/wrap` when the env var is not set. + DefaultWrappingLookupFunc = func(operation, path string) string { + if os.Getenv(EnvVaultWrapTTL) != "" { + return os.Getenv(EnvVaultWrapTTL) + } + + if (operation == http.MethodPut || operation == http.MethodPost) && path == "sys/wrapping/wrap" { + return DefaultWrappingTTL + } + + return "" + } +) + +// Logical is used to perform logical backend operations on Vault. +type Logical struct { + c *Client +} + +// Logical is used to return the client for logical-backend API calls. +func (c *Client) Logical() *Logical { + return &Logical{c: c} +} + +func (c *Logical) Read(path string) (*Secret, error) { + return c.ReadWithDataWithContext(context.Background(), path, nil) +} + +func (c *Logical) ReadWithContext(ctx context.Context, path string) (*Secret, error) { + return c.ReadWithDataWithContext(ctx, path, nil) +} + +func (c *Logical) ReadWithData(path string, data map[string][]string) (*Secret, error) { + return c.ReadWithDataWithContext(context.Background(), path, data) +} + +func (c *Logical) ReadWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + resp, err := c.readRawWithDataWithContext(ctx, path, data) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } + return nil, nil + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) ReadRaw(path string) (*Response, error) { + return c.ReadRawWithData(path, nil) +} + +func (c *Logical) ReadRawWithData(path string, data map[string][]string) (*Response, error) { + return c.ReadRawWithDataWithContext(context.Background(), path, data) +} + +func (c *Logical) ReadRawWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Response, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + return c.readRawWithDataWithContext(ctx, path, data) +} + +func (c *Logical) readRawWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Response, error) { + r := c.c.NewRequest(http.MethodGet, "/v1/"+path) + + var values url.Values + for k, v := range data { + if values == nil { + values = make(url.Values) + } + for _, val := range v { + values.Add(k, val) + } + } + + if values != nil { + r.Params = values + } + + return c.c.RawRequestWithContext(ctx, r) +} + +func (c *Logical) List(path string) (*Secret, error) { + return c.ListWithContext(context.Background(), path) +} + +func (c *Logical) ListWithContext(ctx context.Context, path string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest("LIST", "/v1/"+path) + // Set this for broader compatibility, but we use LIST above to be able to + // handle the wrapping lookup function + r.Method = http.MethodGet + r.Params.Set("list", "true") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } + return nil, nil + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, error) { + return c.WriteWithContext(context.Background(), path, data) +} + +func (c *Logical) WriteWithContext(ctx context.Context, path string, data map[string]interface{}) (*Secret, error) { + r := c.c.NewRequest(http.MethodPut, "/v1/"+path) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + return c.write(ctx, path, r) +} + +func (c *Logical) JSONMergePatch(ctx context.Context, path string, data map[string]interface{}) (*Secret, error) { + r := c.c.NewRequest(http.MethodPatch, "/v1/"+path) + r.Headers.Set("Content-Type", "application/merge-patch+json") + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + return c.write(ctx, path, r) +} + +func (c *Logical) WriteBytes(path string, data []byte) (*Secret, error) { + return c.WriteBytesWithContext(context.Background(), path, data) +} + +func (c *Logical) WriteBytesWithContext(ctx context.Context, path string, data []byte) (*Secret, error) { + r := c.c.NewRequest(http.MethodPut, "/v1/"+path) + r.BodyBytes = data + + return c.write(ctx, path, r) +} + +func (c *Logical) write(ctx context.Context, path string, request *Request) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + resp, err := c.c.rawRequestWithContext(ctx, request) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, err + } + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) Delete(path string) (*Secret, error) { + return c.DeleteWithContext(context.Background(), path) +} + +func (c *Logical) DeleteWithContext(ctx context.Context, path string) (*Secret, error) { + return c.DeleteWithDataWithContext(ctx, path, nil) +} + +func (c *Logical) DeleteWithData(path string, data map[string][]string) (*Secret, error) { + return c.DeleteWithDataWithContext(context.Background(), path, data) +} + +func (c *Logical) DeleteWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/"+path) + + var values url.Values + for k, v := range data { + if values == nil { + values = make(url.Values) + } + for _, val := range v { + values.Add(k, val) + } + } + + if values != nil { + r.Params = values + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, err + } + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) { + return c.UnwrapWithContext(context.Background(), wrappingToken) +} + +func (c *Logical) UnwrapWithContext(ctx context.Context, wrappingToken string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + var data map[string]interface{} + wt := strings.TrimSpace(wrappingToken) + if wrappingToken != "" { + if c.c.Token() == "" { + c.c.SetToken(wt) + } else if wrappingToken != c.c.Token() { + data = map[string]interface{}{ + "token": wt, + } + } + } + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/wrapping/unwrap") + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp == nil || resp.StatusCode != 404 { + if err != nil { + return nil, err + } + if resp == nil { + return nil, nil + } + return ParseSecret(resp.Body) + } + + // In the 404 case this may actually be a wrapped 404 error + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } + + // Otherwise this might be an old-style wrapping token so attempt the old + // method + if wrappingToken != "" { + origToken := c.c.Token() + defer c.c.SetToken(origToken) + c.c.SetToken(wrappingToken) + } + + secret, err = c.ReadWithContext(ctx, wrappedResponseLocation) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("error reading %q: {{err}}", wrappedResponseLocation), err) + } + if secret == nil { + return nil, fmt.Errorf("no value found at %q", wrappedResponseLocation) + } + if secret.Data == nil { + return nil, fmt.Errorf("\"data\" not found in wrapping response") + } + if _, ok := secret.Data["response"]; !ok { + return nil, fmt.Errorf("\"response\" not found in wrapping response \"data\" map") + } + + wrappedSecret := new(Secret) + buf := bytes.NewBufferString(secret.Data["response"].(string)) + if err := jsonutil.DecodeJSONFromReader(buf, wrappedSecret); err != nil { + return nil, errwrap.Wrapf("error unmarshalling wrapped secret: {{err}}", err) + } + + return wrappedSecret, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/output_policy.go b/vendor/github.com/hashicorp/vault/api/output_policy.go new file mode 100644 index 00000000000..85d1617e5e9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/output_policy.go @@ -0,0 +1,82 @@ +package api + +import ( + "fmt" + "net/http" + "net/url" + "strings" +) + +const ( + ErrOutputPolicyRequest = "output a policy, please" +) + +var LastOutputPolicyError *OutputPolicyError + +type OutputPolicyError struct { + method string + path string + finalHCLString string +} + +func (d *OutputPolicyError) Error() string { + if d.finalHCLString == "" { + p, err := d.buildSamplePolicy() + if err != nil { + return err.Error() + } + d.finalHCLString = p + } + + return ErrOutputPolicyRequest +} + +func (d *OutputPolicyError) HCLString() (string, error) { + if d.finalHCLString == "" { + p, err := d.buildSamplePolicy() + if err != nil { + return "", err + } + d.finalHCLString = p + } + return d.finalHCLString, nil +} + +// Builds a sample policy document from the request +func (d *OutputPolicyError) buildSamplePolicy() (string, error) { + var capabilities []string + switch d.method { + case http.MethodGet, "": + capabilities = append(capabilities, "read") + case http.MethodPost, http.MethodPut: + capabilities = append(capabilities, "create") + capabilities = append(capabilities, "update") + case http.MethodPatch: + capabilities = append(capabilities, "patch") + case http.MethodDelete: + capabilities = append(capabilities, "delete") + case "LIST": + capabilities = append(capabilities, "list") + } + + // sanitize, then trim the Vault address and v1 from the front of the path + path, err := url.PathUnescape(d.path) + if err != nil { + return "", fmt.Errorf("failed to unescape request URL characters: %v", err) + } + + // determine whether to add sudo capability + if IsSudoPath(path) { + capabilities = append(capabilities, "sudo") + } + + // the OpenAPI response has a / in front of each path, + // but policies need the path without that leading slash + path = strings.TrimLeft(path, "/") + + capStr := strings.Join(capabilities, `", "`) + return fmt.Sprintf( + `path "%s" { + capabilities = ["%s"] +}`, path, capStr), nil +} diff --git a/vendor/github.com/hashicorp/vault/api/output_string.go b/vendor/github.com/hashicorp/vault/api/output_string.go new file mode 100644 index 00000000000..80c591f20b5 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/output_string.go @@ -0,0 +1,95 @@ +package api + +import ( + "fmt" + "net/http" + "strings" + + retryablehttp "github.com/hashicorp/go-retryablehttp" +) + +const ( + ErrOutputStringRequest = "output a string, please" +) + +var LastOutputStringError *OutputStringError + +type OutputStringError struct { + *retryablehttp.Request + TLSSkipVerify bool + ClientCACert, ClientCAPath string + ClientCert, ClientKey string + finalCurlString string +} + +func (d *OutputStringError) Error() string { + if d.finalCurlString == "" { + cs, err := d.buildCurlString() + if err != nil { + return err.Error() + } + d.finalCurlString = cs + } + + return ErrOutputStringRequest +} + +func (d *OutputStringError) CurlString() (string, error) { + if d.finalCurlString == "" { + cs, err := d.buildCurlString() + if err != nil { + return "", err + } + d.finalCurlString = cs + } + return d.finalCurlString, nil +} + +func (d *OutputStringError) buildCurlString() (string, error) { + body, err := d.Request.BodyBytes() + if err != nil { + return "", err + } + + // Build cURL string + finalCurlString := "curl " + if d.TLSSkipVerify { + finalCurlString += "--insecure " + } + if d.Request.Method != http.MethodGet { + finalCurlString = fmt.Sprintf("%s-X %s ", finalCurlString, d.Request.Method) + } + if d.ClientCACert != "" { + clientCACert := strings.ReplaceAll(d.ClientCACert, "'", "'\"'\"'") + finalCurlString = fmt.Sprintf("%s--cacert '%s' ", finalCurlString, clientCACert) + } + if d.ClientCAPath != "" { + clientCAPath := strings.ReplaceAll(d.ClientCAPath, "'", "'\"'\"'") + finalCurlString = fmt.Sprintf("%s--capath '%s' ", finalCurlString, clientCAPath) + } + if d.ClientCert != "" { + clientCert := strings.ReplaceAll(d.ClientCert, "'", "'\"'\"'") + finalCurlString = fmt.Sprintf("%s--cert '%s' ", finalCurlString, clientCert) + } + if d.ClientKey != "" { + clientKey := strings.ReplaceAll(d.ClientKey, "'", "'\"'\"'") + finalCurlString = fmt.Sprintf("%s--key '%s' ", finalCurlString, clientKey) + } + for k, v := range d.Request.Header { + for _, h := range v { + if strings.ToLower(k) == "x-vault-token" { + h = `$(vault print token)` + } + finalCurlString = fmt.Sprintf("%s-H \"%s: %s\" ", finalCurlString, k, h) + } + } + + if len(body) > 0 { + // We need to escape single quotes since that's what we're using to + // quote the body + escapedBody := strings.ReplaceAll(string(body), "'", "'\"'\"'") + finalCurlString = fmt.Sprintf("%s-d '%s' ", finalCurlString, escapedBody) + } + + return fmt.Sprintf("%s%s", finalCurlString, d.Request.URL.String()), nil +} diff --git a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go new file mode 100644 index 00000000000..2b1b35c3b59 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go @@ -0,0 +1,267 @@ +package api + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "flag" + "net/url" + "os" + "regexp" + + squarejwt "gopkg.in/square/go-jose.v2/jwt" + + "github.com/hashicorp/errwrap" +) + +const ( + // PluginAutoMTLSEnv is used to ensure AutoMTLS is used. This will override + // setting a TLSProviderFunc for a plugin. + PluginAutoMTLSEnv = "VAULT_PLUGIN_AUTOMTLS_ENABLED" + + // PluginMetadataModeEnv is an ENV name used to disable TLS communication + // to bootstrap mounting plugins. + PluginMetadataModeEnv = "VAULT_PLUGIN_METADATA_MODE" + + // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the + // plugin. + PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN" +) + +// sudoPaths is a map containing the paths that require a token's policy +// to have the "sudo" capability. The keys are the paths as strings, in +// the same format as they are returned by the OpenAPI spec. The values +// are the regular expressions that can be used to test whether a given +// path matches that path or not (useful specifically for the paths that +// contain templated fields.) +var sudoPaths = map[string]*regexp.Regexp{ + "/auth/token/accessors/": regexp.MustCompile(`^/auth/token/accessors/$`), + "/pki/root": regexp.MustCompile(`^/pki/root$`), + "/pki/root/sign-self-issued": regexp.MustCompile(`^/pki/root/sign-self-issued$`), + "/sys/audit": regexp.MustCompile(`^/sys/audit$`), + "/sys/audit/{path}": regexp.MustCompile(`^/sys/audit/.+$`), + "/sys/auth/{path}": regexp.MustCompile(`^/sys/auth/.+$`), + "/sys/auth/{path}/tune": regexp.MustCompile(`^/sys/auth/.+/tune$`), + "/sys/config/auditing/request-headers": regexp.MustCompile(`^/sys/config/auditing/request-headers$`), + "/sys/config/auditing/request-headers/{header}": regexp.MustCompile(`^/sys/config/auditing/request-headers/.+$`), + "/sys/config/cors": regexp.MustCompile(`^/sys/config/cors$`), + "/sys/config/ui/headers/": regexp.MustCompile(`^/sys/config/ui/headers/$`), + "/sys/config/ui/headers/{header}": regexp.MustCompile(`^/sys/config/ui/headers/.+$`), + "/sys/leases": regexp.MustCompile(`^/sys/leases$`), + "/sys/leases/lookup/": regexp.MustCompile(`^/sys/leases/lookup/$`), + "/sys/leases/lookup/{prefix}": regexp.MustCompile(`^/sys/leases/lookup/.+$`), + "/sys/leases/revoke-force/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-force/.+$`), + "/sys/leases/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-prefix/.+$`), + "/sys/plugins/catalog/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[^/]+$`), + "/sys/plugins/catalog/{type}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+$`), + "/sys/plugins/catalog/{type}/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+/[^/]+$`), + "/sys/raw": regexp.MustCompile(`^/sys/raw$`), + "/sys/raw/{path}": regexp.MustCompile(`^/sys/raw/.+$`), + "/sys/remount": regexp.MustCompile(`^/sys/remount$`), + "/sys/revoke-force/{prefix}": regexp.MustCompile(`^/sys/revoke-force/.+$`), + "/sys/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/revoke-prefix/.+$`), + "/sys/rotate": regexp.MustCompile(`^/sys/rotate$`), + + // enterprise-only paths + "/sys/replication/dr/primary/secondary-token": regexp.MustCompile(`^/sys/replication/dr/primary/secondary-token$`), + "/sys/replication/performance/primary/secondary-token": regexp.MustCompile(`^/sys/replication/performance/primary/secondary-token$`), + "/sys/replication/primary/secondary-token": regexp.MustCompile(`^/sys/replication/primary/secondary-token$`), + "/sys/replication/reindex": regexp.MustCompile(`^/sys/replication/reindex$`), + "/sys/storage/raft/snapshot-auto/config/": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/$`), + "/sys/storage/raft/snapshot-auto/config/{name}": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/[^/]+$`), +} + +// PluginAPIClientMeta is a helper that plugins can use to configure TLS connections +// back to Vault. +type PluginAPIClientMeta struct { + // These are set by the command line flags. + flagCACert string + flagCAPath string + flagClientCert string + flagClientKey string + flagInsecure bool +} + +// FlagSet returns the flag set for configuring the TLS connection +func (f *PluginAPIClientMeta) FlagSet() *flag.FlagSet { + fs := flag.NewFlagSet("vault plugin settings", flag.ContinueOnError) + + fs.StringVar(&f.flagCACert, "ca-cert", "", "") + fs.StringVar(&f.flagCAPath, "ca-path", "", "") + fs.StringVar(&f.flagClientCert, "client-cert", "", "") + fs.StringVar(&f.flagClientKey, "client-key", "", "") + fs.BoolVar(&f.flagInsecure, "tls-skip-verify", false, "") + + return fs +} + +// GetTLSConfig will return a TLSConfig based off the values from the flags +func (f *PluginAPIClientMeta) GetTLSConfig() *TLSConfig { + // If we need custom TLS configuration, then set it + if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure { + t := &TLSConfig{ + CACert: f.flagCACert, + CAPath: f.flagCAPath, + ClientCert: f.flagClientCert, + ClientKey: f.flagClientKey, + TLSServerName: "", + Insecure: f.flagInsecure, + } + + return t + } + + return nil +} + +// VaultPluginTLSProvider wraps VaultPluginTLSProviderContext using context.Background. +func VaultPluginTLSProvider(apiTLSConfig *TLSConfig) func() (*tls.Config, error) { + return VaultPluginTLSProviderContext(context.Background(), apiTLSConfig) +} + +// VaultPluginTLSProviderContext is run inside a plugin and retrieves the response +// wrapped TLS certificate from vault. It returns a configured TLS Config. +func VaultPluginTLSProviderContext(ctx context.Context, apiTLSConfig *TLSConfig) func() (*tls.Config, error) { + if os.Getenv(PluginAutoMTLSEnv) == "true" || os.Getenv(PluginMetadataModeEnv) == "true" { + return nil + } + + return func() (*tls.Config, error) { + unwrapToken := os.Getenv(PluginUnwrapTokenEnv) + + parsedJWT, err := squarejwt.ParseSigned(unwrapToken) + if err != nil { + return nil, errwrap.Wrapf("error parsing wrapping token: {{err}}", err) + } + + allClaims := make(map[string]interface{}) + if err = parsedJWT.UnsafeClaimsWithoutVerification(&allClaims); err != nil { + return nil, errwrap.Wrapf("error parsing claims from wrapping token: {{err}}", err) + } + + addrClaimRaw, ok := allClaims["addr"] + if !ok { + return nil, errors.New("could not validate addr claim") + } + vaultAddr, ok := addrClaimRaw.(string) + if !ok { + return nil, errors.New("could not parse addr claim") + } + if vaultAddr == "" { + return nil, errors.New(`no vault api_addr found`) + } + + // Sanity check the value + if _, err := url.Parse(vaultAddr); err != nil { + return nil, errwrap.Wrapf("error parsing the vault api_addr: {{err}}", err) + } + + // Unwrap the token + clientConf := DefaultConfig() + clientConf.Address = vaultAddr + if apiTLSConfig != nil { + err := clientConf.ConfigureTLS(apiTLSConfig) + if err != nil { + return nil, errwrap.Wrapf("error configuring api client {{err}}", err) + } + } + client, err := NewClient(clientConf) + if err != nil { + return nil, errwrap.Wrapf("error during api client creation: {{err}}", err) + } + + // Reset token value to make sure nothing has been set by default + client.ClearToken() + + secret, err := client.Logical().UnwrapWithContext(ctx, unwrapToken) + if err != nil { + return nil, errwrap.Wrapf("error during token unwrap request: {{err}}", err) + } + if secret == nil { + return nil, errors.New("error during token unwrap request: secret is nil") + } + + // Retrieve and parse the server's certificate + serverCertBytesRaw, ok := secret.Data["ServerCert"].(string) + if !ok { + return nil, errors.New("error unmarshalling certificate") + } + + serverCertBytes, err := base64.StdEncoding.DecodeString(serverCertBytesRaw) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + serverCert, err := x509.ParseCertificate(serverCertBytes) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + // Retrieve and parse the server's private key + serverKeyB64, ok := secret.Data["ServerKey"].(string) + if !ok { + return nil, errors.New("error unmarshalling certificate") + } + + serverKeyRaw, err := base64.StdEncoding.DecodeString(serverKeyB64) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + serverKey, err := x509.ParseECPrivateKey(serverKeyRaw) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + // Add CA cert to the cert pool + caCertPool := x509.NewCertPool() + caCertPool.AddCert(serverCert) + + // Build a certificate object out of the server's cert and private key. + cert := tls.Certificate{ + Certificate: [][]byte{serverCertBytes}, + PrivateKey: serverKey, + Leaf: serverCert, + } + + // Setup TLS config + tlsConfig := &tls.Config{ + ClientCAs: caCertPool, + RootCAs: caCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + // TLS 1.2 minimum + MinVersion: tls.VersionTLS12, + Certificates: []tls.Certificate{cert}, + ServerName: serverCert.Subject.CommonName, + } + + return tlsConfig, nil + } +} + +func SudoPaths() map[string]*regexp.Regexp { + return sudoPaths +} + +// Determine whether the given path requires the sudo capability +func IsSudoPath(path string) bool { + // Return early if the path is any of the non-templated sudo paths. + if _, ok := sudoPaths[path]; ok { + return true + } + + // Some sudo paths have templated fields in them. + // (e.g. /sys/revoke-prefix/{prefix}) + // The values in the sudoPaths map are actually regular expressions, + // so we can check if our path matches against them. + for _, sudoPathRegexp := range sudoPaths { + match := sudoPathRegexp.MatchString(path) + if match { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go new file mode 100644 index 00000000000..1cbbc62f908 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/request.go @@ -0,0 +1,148 @@ +package api + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + + "github.com/hashicorp/vault/sdk/helper/consts" + + retryablehttp "github.com/hashicorp/go-retryablehttp" +) + +// Request is a raw request configuration structure used to initiate +// API requests to the Vault server. +type Request struct { + Method string + URL *url.URL + Host string + Params url.Values + Headers http.Header + ClientToken string + MFAHeaderVals []string + WrapTTL string + Obj interface{} + + // When possible, use BodyBytes as it is more efficient due to how the + // retry logic works + BodyBytes []byte + + // Fallback + Body io.Reader + BodySize int64 + + // Whether to request overriding soft-mandatory Sentinel policies (RGPs and + // EGPs). If set, the override flag will take effect for all policies + // evaluated during the request. + PolicyOverride bool +} + +// SetJSONBody is used to set a request body that is a JSON-encoded value. +func (r *Request) SetJSONBody(val interface{}) error { + buf, err := json.Marshal(val) + if err != nil { + return err + } + + r.Obj = val + r.BodyBytes = buf + return nil +} + +// ResetJSONBody is used to reset the body for a redirect +func (r *Request) ResetJSONBody() error { + if r.BodyBytes == nil { + return nil + } + return r.SetJSONBody(r.Obj) +} + +// DEPRECATED: ToHTTP turns this request into a valid *http.Request for use +// with the net/http package. +func (r *Request) ToHTTP() (*http.Request, error) { + req, err := r.toRetryableHTTP() + if err != nil { + return nil, err + } + + switch { + case r.BodyBytes == nil && r.Body == nil: + // No body + + case r.BodyBytes != nil: + req.Request.Body = ioutil.NopCloser(bytes.NewReader(r.BodyBytes)) + + default: + if c, ok := r.Body.(io.ReadCloser); ok { + req.Request.Body = c + } else { + req.Request.Body = ioutil.NopCloser(r.Body) + } + } + + return req.Request, nil +} + +func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) { + // Encode the query parameters + r.URL.RawQuery = r.Params.Encode() + + // Create the HTTP request, defaulting to retryable + var req *retryablehttp.Request + + var err error + var body interface{} + + switch { + case r.BodyBytes == nil && r.Body == nil: + // No body + + case r.BodyBytes != nil: + // Use bytes, it's more efficient + body = r.BodyBytes + + default: + body = r.Body + } + + req, err = retryablehttp.NewRequest(r.Method, r.URL.RequestURI(), body) + if err != nil { + return nil, err + } + + req.URL.User = r.URL.User + req.URL.Scheme = r.URL.Scheme + req.URL.Host = r.URL.Host + req.Host = r.Host + + if r.Headers != nil { + for header, vals := range r.Headers { + for _, val := range vals { + req.Header.Add(header, val) + } + } + } + + if len(r.ClientToken) != 0 { + req.Header.Set(consts.AuthHeaderName, r.ClientToken) + } + + if len(r.WrapTTL) != 0 { + req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL) + } + + if len(r.MFAHeaderVals) != 0 { + for _, mfaHeaderVal := range r.MFAHeaderVals { + req.Header.Add("X-Vault-MFA", mfaHeaderVal) + } + } + + if r.PolicyOverride { + req.Header.Set("X-Vault-Policy-Override", "true") + } + + return req, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/response.go b/vendor/github.com/hashicorp/vault/api/response.go new file mode 100644 index 00000000000..9ce3d12aacc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/response.go @@ -0,0 +1,133 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +// Response is a raw response that wraps an HTTP response. +type Response struct { + *http.Response +} + +// DecodeJSON will decode the response body to a JSON structure. This +// will consume the response body, but will not close it. Close must +// still be called. +func (r *Response) DecodeJSON(out interface{}) error { + return jsonutil.DecodeJSONFromReader(r.Body, out) +} + +// Error returns an error response if there is one. If there is an error, +// this will fully consume the response body, but will not close it. The +// body must still be closed manually. +func (r *Response) Error() error { + // 200 to 399 are okay status codes. 429 is the code for health status of + // standby nodes, otherwise, 429 is treated as quota limit reached. + if (r.StatusCode >= 200 && r.StatusCode < 400) || (r.StatusCode == 429 && r.Request.URL.Path == "/v1/sys/health") { + return nil + } + + // We have an error. Let's copy the body into our own buffer first, + // so that if we can't decode JSON, we can at least copy it raw. + bodyBuf := &bytes.Buffer{} + if _, err := io.Copy(bodyBuf, r.Body); err != nil { + return err + } + + r.Body.Close() + r.Body = ioutil.NopCloser(bodyBuf) + ns := r.Header.Get(consts.NamespaceHeaderName) + + // Build up the error object + respErr := &ResponseError{ + HTTPMethod: r.Request.Method, + URL: r.Request.URL.String(), + StatusCode: r.StatusCode, + NamespacePath: ns, + } + + // Decode the error response if we can. Note that we wrap the bodyBuf + // in a bytes.Reader here so that the JSON decoder doesn't move the + // read pointer for the original buffer. + var resp ErrorResponse + if err := jsonutil.DecodeJSON(bodyBuf.Bytes(), &resp); err != nil { + // Store the fact that we couldn't decode the errors + respErr.RawError = true + respErr.Errors = []string{bodyBuf.String()} + } else { + // Store the decoded errors + respErr.Errors = resp.Errors + } + + return respErr +} + +// ErrorResponse is the raw structure of errors when they're returned by the +// HTTP API. +type ErrorResponse struct { + Errors []string +} + +// ResponseError is the error returned when Vault responds with an error or +// non-success HTTP status code. If a request to Vault fails because of a +// network error a different error message will be returned. ResponseError gives +// access to the underlying errors and status code. +type ResponseError struct { + // HTTPMethod is the HTTP method for the request (PUT, GET, etc). + HTTPMethod string + + // URL is the URL of the request. + URL string + + // StatusCode is the HTTP status code. + StatusCode int + + // RawError marks that the underlying error messages returned by Vault were + // not parsable. The Errors slice will contain the raw response body as the + // first and only error string if this value is set to true. + RawError bool + + // Errors are the underlying errors returned by Vault. + Errors []string + + // Namespace path to be reported to the client if it is set to anything other + // than root + NamespacePath string +} + +// Error returns a human-readable error string for the response error. +func (r *ResponseError) Error() string { + errString := "Errors" + if r.RawError { + errString = "Raw Message" + } + + var ns string + if r.NamespacePath != "" && r.NamespacePath != "root/" { + ns = "Namespace: " + r.NamespacePath + "\n" + } + + var errBody bytes.Buffer + errBody.WriteString(fmt.Sprintf( + "Error making API request.\n\n"+ + ns+ + "URL: %s %s\n"+ + "Code: %d. %s:\n\n", + r.HTTPMethod, r.URL, r.StatusCode, errString)) + + if r.RawError && len(r.Errors) == 1 { + errBody.WriteString(r.Errors[0]) + } else { + for _, err := range r.Errors { + errBody.WriteString(fmt.Sprintf("* %s", err)) + } + } + + return errBody.String() +} diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go new file mode 100644 index 00000000000..77e3ee9a9e0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/secret.go @@ -0,0 +1,320 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// Secret is the structure returned for every secret within Vault. +type Secret struct { + // The request ID that generated this response + RequestID string `json:"request_id"` + + LeaseID string `json:"lease_id"` + LeaseDuration int `json:"lease_duration"` + Renewable bool `json:"renewable"` + + // Data is the actual contents of the secret. The format of the data + // is arbitrary and up to the secret backend. + Data map[string]interface{} `json:"data"` + + // Warnings contains any warnings related to the operation. These + // are not issues that caused the command to fail, but that the + // client should be aware of. + Warnings []string `json:"warnings"` + + // Auth, if non-nil, means that there was authentication information + // attached to this response. + Auth *SecretAuth `json:"auth,omitempty"` + + // WrapInfo, if non-nil, means that the initial response was wrapped in the + // cubbyhole of the given token (which has a TTL of the given number of + // seconds) + WrapInfo *SecretWrapInfo `json:"wrap_info,omitempty"` +} + +// TokenID returns the standardized token ID (token) for the given secret. +func (s *Secret) TokenID() (string, error) { + if s == nil { + return "", nil + } + + if s.Auth != nil && len(s.Auth.ClientToken) > 0 { + return s.Auth.ClientToken, nil + } + + if s.Data == nil || s.Data["id"] == nil { + return "", nil + } + + id, ok := s.Data["id"].(string) + if !ok { + return "", fmt.Errorf("token found but in the wrong format") + } + + return id, nil +} + +// TokenAccessor returns the standardized token accessor for the given secret. +// If the secret is nil or does not contain an accessor, this returns the empty +// string. +func (s *Secret) TokenAccessor() (string, error) { + if s == nil { + return "", nil + } + + if s.Auth != nil && len(s.Auth.Accessor) > 0 { + return s.Auth.Accessor, nil + } + + if s.Data == nil || s.Data["accessor"] == nil { + return "", nil + } + + accessor, ok := s.Data["accessor"].(string) + if !ok { + return "", fmt.Errorf("token found but in the wrong format") + } + + return accessor, nil +} + +// TokenRemainingUses returns the standardized remaining uses for the given +// secret. If the secret is nil or does not contain the "num_uses", this +// returns -1. On error, this will return -1 and a non-nil error. +func (s *Secret) TokenRemainingUses() (int, error) { + if s == nil || s.Data == nil || s.Data["num_uses"] == nil { + return -1, nil + } + + return parseutil.SafeParseInt(s.Data["num_uses"]) +} + +// TokenPolicies returns the standardized list of policies for the given secret. +// If the secret is nil or does not contain any policies, this returns nil. It +// also populates the secret's Auth info with identity/token policy info. +func (s *Secret) TokenPolicies() ([]string, error) { + if s == nil { + return nil, nil + } + + if s.Auth != nil && len(s.Auth.Policies) > 0 { + return s.Auth.Policies, nil + } + + if s.Data == nil || s.Data["policies"] == nil { + return nil, nil + } + + var tokenPolicies []string + + // Token policies + { + _, ok := s.Data["policies"] + if !ok { + goto TOKEN_DONE + } + + sList, ok := s.Data["policies"].([]string) + if ok { + tokenPolicies = sList + goto TOKEN_DONE + } + + list, ok := s.Data["policies"].([]interface{}) + if !ok { + return nil, fmt.Errorf("unable to convert token policies to expected format") + } + for _, v := range list { + p, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unable to convert policy %v to string", v) + } + tokenPolicies = append(tokenPolicies, p) + } + } + +TOKEN_DONE: + var identityPolicies []string + + // Identity policies + { + _, ok := s.Data["identity_policies"] + if !ok { + goto DONE + } + + sList, ok := s.Data["identity_policies"].([]string) + if ok { + identityPolicies = sList + goto DONE + } + + list, ok := s.Data["identity_policies"].([]interface{}) + if !ok { + return nil, fmt.Errorf("unable to convert identity policies to expected format") + } + for _, v := range list { + p, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unable to convert policy %v to string", v) + } + identityPolicies = append(identityPolicies, p) + } + } + +DONE: + + if s.Auth == nil { + s.Auth = &SecretAuth{} + } + + policies := append(tokenPolicies, identityPolicies...) + + s.Auth.TokenPolicies = tokenPolicies + s.Auth.IdentityPolicies = identityPolicies + s.Auth.Policies = policies + + return policies, nil +} + +// TokenMetadata returns the map of metadata associated with this token, if any +// exists. If the secret is nil or does not contain the "metadata" key, this +// returns nil. +func (s *Secret) TokenMetadata() (map[string]string, error) { + if s == nil { + return nil, nil + } + + if s.Auth != nil && len(s.Auth.Metadata) > 0 { + return s.Auth.Metadata, nil + } + + if s.Data == nil || (s.Data["metadata"] == nil && s.Data["meta"] == nil) { + return nil, nil + } + + data, ok := s.Data["metadata"].(map[string]interface{}) + if !ok { + data, ok = s.Data["meta"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unable to convert metadata field to expected format") + } + } + + metadata := make(map[string]string, len(data)) + for k, v := range data { + typed, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unable to convert metadata value %v to string", v) + } + metadata[k] = typed + } + + return metadata, nil +} + +// TokenIsRenewable returns the standardized token renewability for the given +// secret. If the secret is nil or does not contain the "renewable" key, this +// returns false. +func (s *Secret) TokenIsRenewable() (bool, error) { + if s == nil { + return false, nil + } + + if s.Auth != nil && s.Auth.Renewable { + return s.Auth.Renewable, nil + } + + if s.Data == nil || s.Data["renewable"] == nil { + return false, nil + } + + renewable, err := parseutil.ParseBool(s.Data["renewable"]) + if err != nil { + return false, errwrap.Wrapf("could not convert renewable value to a boolean: {{err}}", err) + } + + return renewable, nil +} + +// TokenTTL returns the standardized remaining token TTL for the given secret. +// If the secret is nil or does not contain a TTL, this returns 0. +func (s *Secret) TokenTTL() (time.Duration, error) { + if s == nil { + return 0, nil + } + + if s.Auth != nil && s.Auth.LeaseDuration > 0 { + return time.Duration(s.Auth.LeaseDuration) * time.Second, nil + } + + if s.Data == nil || s.Data["ttl"] == nil { + return 0, nil + } + + ttl, err := parseutil.ParseDurationSecond(s.Data["ttl"]) + if err != nil { + return 0, err + } + + return ttl, nil +} + +// SecretWrapInfo contains wrapping information if we have it. If what is +// contained is an authentication token, the accessor for the token will be +// available in WrappedAccessor. +type SecretWrapInfo struct { + Token string `json:"token"` + Accessor string `json:"accessor"` + TTL int `json:"ttl"` + CreationTime time.Time `json:"creation_time"` + CreationPath string `json:"creation_path"` + WrappedAccessor string `json:"wrapped_accessor"` +} + +// SecretAuth is the structure containing auth information if we have it. +type SecretAuth struct { + ClientToken string `json:"client_token"` + Accessor string `json:"accessor"` + Policies []string `json:"policies"` + TokenPolicies []string `json:"token_policies"` + IdentityPolicies []string `json:"identity_policies"` + Metadata map[string]string `json:"metadata"` + Orphan bool `json:"orphan"` + EntityID string `json:"entity_id"` + + LeaseDuration int `json:"lease_duration"` + Renewable bool `json:"renewable"` + + MFARequirement *logical.MFARequirement `json:"mfa_requirement"` +} + +// ParseSecret is used to parse a secret value from JSON from an io.Reader. +func ParseSecret(r io.Reader) (*Secret, error) { + // First read the data into a buffer. Not super efficient but we want to + // know if we actually have a body or not. + var buf bytes.Buffer + _, err := buf.ReadFrom(r) + if err != nil { + return nil, err + } + if buf.Len() == 0 { + return nil, nil + } + + // First decode the JSON into a map[string]interface{} + var secret Secret + if err := jsonutil.DecodeJSONFromReader(&buf, &secret); err != nil { + return nil, err + } + + return &secret, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/ssh.go b/vendor/github.com/hashicorp/vault/api/ssh.go new file mode 100644 index 00000000000..b832e274829 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/ssh.go @@ -0,0 +1,75 @@ +package api + +import ( + "context" + "fmt" + "net/http" +) + +// SSH is used to return a client to invoke operations on SSH backend. +type SSH struct { + c *Client + MountPoint string +} + +// SSH returns the client for logical-backend API calls. +func (c *Client) SSH() *SSH { + return c.SSHWithMountPoint(SSHHelperDefaultMountPoint) +} + +// SSHWithMountPoint returns the client with specific SSH mount point. +func (c *Client) SSHWithMountPoint(mountPoint string) *SSH { + return &SSH{ + c: c, + MountPoint: mountPoint, + } +} + +// Credential wraps CredentialWithContext using context.Background. +func (c *SSH) Credential(role string, data map[string]interface{}) (*Secret, error) { + return c.CredentialWithContext(context.Background(), role, data) +} + +// CredentialWithContext invokes the SSH backend API to create a credential to establish an SSH session. +func (c *SSH) CredentialWithContext(ctx context.Context, role string, data map[string]interface{}) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/%s/creds/%s", c.MountPoint, role)) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +// SignKey wraps SignKeyWithContext using context.Background. +func (c *SSH) SignKey(role string, data map[string]interface{}) (*Secret, error) { + return c.SignKeyWithContext(context.Background(), role, data) +} + +// SignKeyWithContext signs the given public key and returns a signed public key to pass +// along with the SSH request. +func (c *SSH) SignKeyWithContext(ctx context.Context, role string, data map[string]interface{}) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/%s/sign/%s", c.MountPoint, role)) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} diff --git a/vendor/github.com/hashicorp/vault/api/ssh_agent.go b/vendor/github.com/hashicorp/vault/api/ssh_agent.go new file mode 100644 index 00000000000..03fe2bea53e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/ssh_agent.go @@ -0,0 +1,247 @@ +package api + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "os" + + "github.com/hashicorp/errwrap" + cleanhttp "github.com/hashicorp/go-cleanhttp" + multierror "github.com/hashicorp/go-multierror" + rootcerts "github.com/hashicorp/go-rootcerts" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/sdk/helper/hclutil" + "github.com/mitchellh/mapstructure" +) + +const ( + // SSHHelperDefaultMountPoint is the default path at which SSH backend will be + // mounted in the Vault server. + SSHHelperDefaultMountPoint = "ssh" + + // VerifyEchoRequest is the echo request message sent as OTP by the helper. + VerifyEchoRequest = "verify-echo-request" + + // VerifyEchoResponse is the echo response message sent as a response to OTP + // matching echo request. + VerifyEchoResponse = "verify-echo-response" +) + +// SSHHelper is a structure representing a vault-ssh-helper which can talk to vault server +// in order to verify the OTP entered by the user. It contains the path at which +// SSH backend is mounted at the server. +type SSHHelper struct { + c *Client + MountPoint string +} + +// SSHVerifyResponse is a structure representing the fields in Vault server's +// response. +type SSHVerifyResponse struct { + // Usually empty. If the request OTP is echo request message, this will + // be set to the corresponding echo response message. + Message string `json:"message" mapstructure:"message"` + + // Username associated with the OTP + Username string `json:"username" mapstructure:"username"` + + // IP associated with the OTP + IP string `json:"ip" mapstructure:"ip"` + + // Name of the role against which the OTP was issued + RoleName string `json:"role_name" mapstructure:"role_name"` +} + +// SSHHelperConfig is a structure which represents the entries from the vault-ssh-helper's configuration file. +type SSHHelperConfig struct { + VaultAddr string `hcl:"vault_addr"` + SSHMountPoint string `hcl:"ssh_mount_point"` + Namespace string `hcl:"namespace"` + CACert string `hcl:"ca_cert"` + CAPath string `hcl:"ca_path"` + AllowedCidrList string `hcl:"allowed_cidr_list"` + AllowedRoles string `hcl:"allowed_roles"` + TLSSkipVerify bool `hcl:"tls_skip_verify"` + TLSServerName string `hcl:"tls_server_name"` +} + +// SetTLSParameters sets the TLS parameters for this SSH agent. +func (c *SSHHelperConfig) SetTLSParameters(clientConfig *Config, certPool *x509.CertPool) { + tlsConfig := &tls.Config{ + InsecureSkipVerify: c.TLSSkipVerify, + MinVersion: tls.VersionTLS12, + RootCAs: certPool, + ServerName: c.TLSServerName, + } + + transport := cleanhttp.DefaultTransport() + transport.TLSClientConfig = tlsConfig + clientConfig.HttpClient.Transport = transport +} + +// Returns true if any of the following conditions are true: +// - CA cert is configured +// - CA path is configured +// - configured to skip certificate verification +// - TLS server name is configured +func (c *SSHHelperConfig) shouldSetTLSParameters() bool { + return c.CACert != "" || c.CAPath != "" || c.TLSServerName != "" || c.TLSSkipVerify +} + +// NewClient returns a new client for the configuration. This client will be used by the +// vault-ssh-helper to communicate with Vault server and verify the OTP entered by user. +// If the configuration supplies Vault SSL certificates, then the client will +// have TLS configured in its transport. +func (c *SSHHelperConfig) NewClient() (*Client, error) { + // Creating a default client configuration for communicating with vault server. + clientConfig := DefaultConfig() + + // Pointing the client to the actual address of vault server. + clientConfig.Address = c.VaultAddr + + // Check if certificates are provided via config file. + if c.shouldSetTLSParameters() { + rootConfig := &rootcerts.Config{ + CAFile: c.CACert, + CAPath: c.CAPath, + } + certPool, err := rootcerts.LoadCACerts(rootConfig) + if err != nil { + return nil, err + } + // Enable TLS on the HTTP client information + c.SetTLSParameters(clientConfig, certPool) + } + + // Creating the client object for the given configuration + client, err := NewClient(clientConfig) + if err != nil { + return nil, err + } + + // Configure namespace + if c.Namespace != "" { + client.SetNamespace(c.Namespace) + } + + return client, nil +} + +// LoadSSHHelperConfig loads ssh-helper's configuration from the file and populates the corresponding +// in-memory structure. +// +// Vault address is a required parameter. +// Mount point defaults to "ssh". +func LoadSSHHelperConfig(path string) (*SSHHelperConfig, error) { + contents, err := ioutil.ReadFile(path) + if err != nil && !os.IsNotExist(err) { + return nil, multierror.Prefix(err, "ssh_helper:") + } + return ParseSSHHelperConfig(string(contents)) +} + +// ParseSSHHelperConfig parses the given contents as a string for the SSHHelper +// configuration. +func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) { + root, err := hcl.Parse(string(contents)) + if err != nil { + return nil, errwrap.Wrapf("error parsing config: {{err}}", err) + } + + list, ok := root.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing config: file doesn't contain a root object") + } + + valid := []string{ + "vault_addr", + "ssh_mount_point", + "namespace", + "ca_cert", + "ca_path", + "allowed_cidr_list", + "allowed_roles", + "tls_skip_verify", + "tls_server_name", + } + if err := hclutil.CheckHCLKeys(list, valid); err != nil { + return nil, multierror.Prefix(err, "ssh_helper:") + } + + var c SSHHelperConfig + c.SSHMountPoint = SSHHelperDefaultMountPoint + if err := hcl.DecodeObject(&c, list); err != nil { + return nil, multierror.Prefix(err, "ssh_helper:") + } + + if c.VaultAddr == "" { + return nil, fmt.Errorf(`missing config "vault_addr"`) + } + return &c, nil +} + +// SSHHelper creates an SSHHelper object which can talk to Vault server with SSH backend +// mounted at default path ("ssh"). +func (c *Client) SSHHelper() *SSHHelper { + return c.SSHHelperWithMountPoint(SSHHelperDefaultMountPoint) +} + +// SSHHelperWithMountPoint creates an SSHHelper object which can talk to Vault server with SSH backend +// mounted at a specific mount point. +func (c *Client) SSHHelperWithMountPoint(mountPoint string) *SSHHelper { + return &SSHHelper{ + c: c, + MountPoint: mountPoint, + } +} + +// Verify verifies if the key provided by user is present in Vault server. The response +// will contain the IP address and username associated with the OTP. In case the +// OTP matches the echo request message, instead of searching an entry for the OTP, +// an echo response message is returned. This feature is used by ssh-helper to verify if +// its configured correctly. +func (c *SSHHelper) Verify(otp string) (*SSHVerifyResponse, error) { + return c.VerifyWithContext(context.Background(), otp) +} + +// VerifyWithContext the same as Verify but with a custom context. +func (c *SSHHelper) VerifyWithContext(ctx context.Context, otp string) (*SSHVerifyResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + data := map[string]interface{}{ + "otp": otp, + } + verifyPath := fmt.Sprintf("/v1/%s/verify", c.MountPoint) + r := c.c.NewRequest(http.MethodPut, verifyPath) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + + if secret.Data == nil { + return nil, nil + } + + var verifyResp SSHVerifyResponse + err = mapstructure.Decode(secret.Data, &verifyResp) + if err != nil { + return nil, err + } + return &verifyResp, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys.go b/vendor/github.com/hashicorp/vault/api/sys.go new file mode 100644 index 00000000000..5fb111887c0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys.go @@ -0,0 +1,11 @@ +package api + +// Sys is used to perform system-related operations on Vault. +type Sys struct { + c *Client +} + +// Sys is used to return the client for sys-related API calls. +func (c *Client) Sys() *Sys { + return &Sys{c: c} +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_audit.go b/vendor/github.com/hashicorp/vault/api/sys_audit.go new file mode 100644 index 00000000000..82d9aab0b7a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_audit.go @@ -0,0 +1,156 @@ +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) AuditHash(path string, input string) (string, error) { + return c.AuditHashWithContext(context.Background(), path, input) +} + +func (c *Sys) AuditHashWithContext(ctx context.Context, path string, input string) (string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "input": input, + } + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/sys/audit-hash/%s", path)) + if err := r.SetJSONBody(body); err != nil { + return "", err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return "", err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return "", err + } + if secret == nil || secret.Data == nil { + return "", errors.New("data from server response is empty") + } + + hash, ok := secret.Data["hash"] + if !ok { + return "", errors.New("hash not found in response data") + } + hashStr, ok := hash.(string) + if !ok { + return "", errors.New("could not parse hash in response data") + } + + return hashStr, nil +} + +func (c *Sys) ListAudit() (map[string]*Audit, error) { + return c.ListAuditWithContext(context.Background()) +} + +func (c *Sys) ListAuditWithContext(ctx context.Context) (map[string]*Audit, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/audit") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mounts := map[string]*Audit{} + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err + } + + return mounts, nil +} + +// DEPRECATED: Use EnableAuditWithOptions instead +func (c *Sys) EnableAudit( + path string, auditType string, desc string, opts map[string]string, +) error { + return c.EnableAuditWithOptions(path, &EnableAuditOptions{ + Type: auditType, + Description: desc, + Options: opts, + }) +} + +func (c *Sys) EnableAuditWithOptions(path string, options *EnableAuditOptions) error { + return c.EnableAuditWithOptionsWithContext(context.Background(), path, options) +} + +func (c *Sys) EnableAuditWithOptionsWithContext(ctx context.Context, path string, options *EnableAuditOptions) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/sys/audit/%s", path)) + if err := r.SetJSONBody(options); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DisableAudit(path string) error { + return c.DisableAuditWithContext(context.Background(), path) +} + +func (c *Sys) DisableAuditWithContext(ctx context.Context, path string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/sys/audit/%s", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + + if err == nil { + defer resp.Body.Close() + } + return err +} + +// Structures for the requests/response are all down here. They aren't +// individually documented because the map almost directly to the raw HTTP API +// documentation. Please refer to that documentation for more details. + +type EnableAuditOptions struct { + Type string `json:"type" mapstructure:"type"` + Description string `json:"description" mapstructure:"description"` + Options map[string]string `json:"options" mapstructure:"options"` + Local bool `json:"local" mapstructure:"local"` +} + +type Audit struct { + Type string `json:"type" mapstructure:"type"` + Description string `json:"description" mapstructure:"description"` + Options map[string]string `json:"options" mapstructure:"options"` + Local bool `json:"local" mapstructure:"local"` + Path string `json:"path" mapstructure:"path"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_auth.go b/vendor/github.com/hashicorp/vault/api/sys_auth.go new file mode 100644 index 00000000000..238bd5e468a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_auth.go @@ -0,0 +1,98 @@ +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) ListAuth() (map[string]*AuthMount, error) { + return c.ListAuthWithContext(context.Background()) +} + +func (c *Sys) ListAuthWithContext(ctx context.Context) (map[string]*AuthMount, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/auth") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mounts := map[string]*AuthMount{} + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err + } + + return mounts, nil +} + +// DEPRECATED: Use EnableAuthWithOptions instead +func (c *Sys) EnableAuth(path, authType, desc string) error { + return c.EnableAuthWithOptions(path, &EnableAuthOptions{ + Type: authType, + Description: desc, + }) +} + +func (c *Sys) EnableAuthWithOptions(path string, options *EnableAuthOptions) error { + return c.EnableAuthWithOptionsWithContext(context.Background(), path, options) +} + +func (c *Sys) EnableAuthWithOptionsWithContext(ctx context.Context, path string, options *EnableAuthOptions) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/auth/%s", path)) + if err := r.SetJSONBody(options); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DisableAuth(path string) error { + return c.DisableAuthWithContext(context.Background(), path) +} + +func (c *Sys) DisableAuthWithContext(ctx context.Context, path string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/sys/auth/%s", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// Rather than duplicate, we can use modern Go's type aliasing +type ( + EnableAuthOptions = MountInput + AuthConfigInput = MountConfigInput + AuthMount = MountOutput + AuthConfigOutput = MountConfigOutput +) diff --git a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go new file mode 100644 index 00000000000..af306a07f31 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go @@ -0,0 +1,77 @@ +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) CapabilitiesSelf(path string) ([]string, error) { + return c.CapabilitiesSelfWithContext(context.Background(), path) +} + +func (c *Sys) CapabilitiesSelfWithContext(ctx context.Context, path string) ([]string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + return c.CapabilitiesWithContext(ctx, c.c.Token(), path) +} + +func (c *Sys) Capabilities(token, path string) ([]string, error) { + return c.CapabilitiesWithContext(context.Background(), token, path) +} + +func (c *Sys) CapabilitiesWithContext(ctx context.Context, token, path string) ([]string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]string{ + "token": token, + "path": path, + } + + reqPath := "/v1/sys/capabilities" + if token == c.c.Token() { + reqPath = fmt.Sprintf("%s-self", reqPath) + } + + r := c.c.NewRequest(http.MethodPost, reqPath) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var res []string + err = mapstructure.Decode(secret.Data[path], &res) + if err != nil { + return nil, err + } + + if len(res) == 0 { + _, ok := secret.Data["capabilities"] + if ok { + err = mapstructure.Decode(secret.Data["capabilities"], &res) + if err != nil { + return nil, err + } + } + } + + return res, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_config_cors.go b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go new file mode 100644 index 00000000000..1e2cda4f48c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go @@ -0,0 +1,91 @@ +package api + +import ( + "context" + "errors" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) CORSStatus() (*CORSResponse, error) { + return c.CORSStatusWithContext(context.Background()) +} + +func (c *Sys) CORSStatusWithContext(ctx context.Context) (*CORSResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/config/cors") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result CORSResponse + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) ConfigureCORS(req *CORSRequest) error { + return c.ConfigureCORSWithContext(context.Background(), req) +} + +func (c *Sys) ConfigureCORSWithContext(ctx context.Context, req *CORSRequest) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/config/cors") + if err := r.SetJSONBody(req); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) DisableCORS() error { + return c.DisableCORSWithContext(context.Background()) +} + +func (c *Sys) DisableCORSWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/config/cors") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +type CORSRequest struct { + AllowedOrigins []string `json:"allowed_origins" mapstructure:"allowed_origins"` + AllowedHeaders []string `json:"allowed_headers" mapstructure:"allowed_headers"` + Enabled bool `json:"enabled" mapstructure:"enabled"` +} + +type CORSResponse struct { + AllowedOrigins []string `json:"allowed_origins" mapstructure:"allowed_origins"` + AllowedHeaders []string `json:"allowed_headers" mapstructure:"allowed_headers"` + Enabled bool `json:"enabled" mapstructure:"enabled"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go new file mode 100644 index 00000000000..096cadb793d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go @@ -0,0 +1,195 @@ +package api + +import ( + "context" + "net/http" +) + +func (c *Sys) GenerateRootStatus() (*GenerateRootStatusResponse, error) { + return c.GenerateRootStatusWithContext(context.Background()) +} + +func (c *Sys) GenerateDROperationTokenStatus() (*GenerateRootStatusResponse, error) { + return c.GenerateDROperationTokenStatusWithContext(context.Background()) +} + +func (c *Sys) GenerateRecoveryOperationTokenStatus() (*GenerateRootStatusResponse, error) { + return c.GenerateRecoveryOperationTokenStatusWithContext(context.Background()) +} + +func (c *Sys) GenerateRootStatusWithContext(ctx context.Context) (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommonWithContext(ctx, "/v1/sys/generate-root/attempt") +} + +func (c *Sys) GenerateDROperationTokenStatusWithContext(ctx context.Context) (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommonWithContext(ctx, "/v1/sys/replication/dr/secondary/generate-operation-token/attempt") +} + +func (c *Sys) GenerateRecoveryOperationTokenStatusWithContext(ctx context.Context) (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommonWithContext(ctx, "/v1/sys/generate-recovery-token/attempt") +} + +func (c *Sys) generateRootStatusCommonWithContext(ctx context.Context, path string) (*GenerateRootStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, path) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) GenerateRootInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.GenerateRootInitWithContext(context.Background(), otp, pgpKey) +} + +func (c *Sys) GenerateDROperationTokenInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.GenerateDROperationTokenInitWithContext(context.Background(), otp, pgpKey) +} + +func (c *Sys) GenerateRecoveryOperationTokenInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.GenerateRecoveryOperationTokenInitWithContext(context.Background(), otp, pgpKey) +} + +func (c *Sys) GenerateRootInitWithContext(ctx context.Context, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommonWithContext(ctx, "/v1/sys/generate-root/attempt", otp, pgpKey) +} + +func (c *Sys) GenerateDROperationTokenInitWithContext(ctx context.Context, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommonWithContext(ctx, "/v1/sys/replication/dr/secondary/generate-operation-token/attempt", otp, pgpKey) +} + +func (c *Sys) GenerateRecoveryOperationTokenInitWithContext(ctx context.Context, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommonWithContext(ctx, "/v1/sys/generate-recovery-token/attempt", otp, pgpKey) +} + +func (c *Sys) generateRootInitCommonWithContext(ctx context.Context, path, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "otp": otp, + "pgp_key": pgpKey, + } + + r := c.c.NewRequest(http.MethodPut, path) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) GenerateRootCancel() error { + return c.GenerateRootCancelWithContext(context.Background()) +} + +func (c *Sys) GenerateDROperationTokenCancel() error { + return c.GenerateDROperationTokenCancelWithContext(context.Background()) +} + +func (c *Sys) GenerateRecoveryOperationTokenCancel() error { + return c.GenerateRecoveryOperationTokenCancelWithContext(context.Background()) +} + +func (c *Sys) GenerateRootCancelWithContext(ctx context.Context) error { + return c.generateRootCancelCommonWithContext(ctx, "/v1/sys/generate-root/attempt") +} + +func (c *Sys) GenerateDROperationTokenCancelWithContext(ctx context.Context) error { + return c.generateRootCancelCommonWithContext(ctx, "/v1/sys/replication/dr/secondary/generate-operation-token/attempt") +} + +func (c *Sys) GenerateRecoveryOperationTokenCancelWithContext(ctx context.Context) error { + return c.generateRootCancelCommonWithContext(ctx, "/v1/sys/generate-recovery-token/attempt") +} + +func (c *Sys) generateRootCancelCommonWithContext(ctx context.Context, path string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, path) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) GenerateRootUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.GenerateRootUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) GenerateDROperationTokenUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.GenerateDROperationTokenUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) GenerateRecoveryOperationTokenUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.GenerateRecoveryOperationTokenUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) GenerateRootUpdateWithContext(ctx context.Context, shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommonWithContext(ctx, "/v1/sys/generate-root/update", shard, nonce) +} + +func (c *Sys) GenerateDROperationTokenUpdateWithContext(ctx context.Context, shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommonWithContext(ctx, "/v1/sys/replication/dr/secondary/generate-operation-token/update", shard, nonce) +} + +func (c *Sys) GenerateRecoveryOperationTokenUpdateWithContext(ctx context.Context, shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommonWithContext(ctx, "/v1/sys/generate-recovery-token/update", shard, nonce) +} + +func (c *Sys) generateRootUpdateCommonWithContext(ctx context.Context, path, shard, nonce string) (*GenerateRootStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest(http.MethodPut, path) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type GenerateRootStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + Progress int `json:"progress"` + Required int `json:"required"` + Complete bool `json:"complete"` + EncodedToken string `json:"encoded_token"` + EncodedRootToken string `json:"encoded_root_token"` + PGPFingerprint string `json:"pgp_fingerprint"` + OTP string `json:"otp"` + OTPLength int `json:"otp_length"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_hastatus.go b/vendor/github.com/hashicorp/vault/api/sys_hastatus.go new file mode 100644 index 00000000000..d89d59651a9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_hastatus.go @@ -0,0 +1,43 @@ +package api + +import ( + "context" + "net/http" + "time" +) + +func (c *Sys) HAStatus() (*HAStatusResponse, error) { + return c.HAStatusWithContext(context.Background()) +} + +func (c *Sys) HAStatusWithContext(ctx context.Context) (*HAStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/ha-status") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result HAStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type HAStatusResponse struct { + Nodes []HANode +} + +type HANode struct { + Hostname string `json:"hostname"` + APIAddress string `json:"api_address"` + ClusterAddress string `json:"cluster_address"` + ActiveNode bool `json:"active_node"` + LastEcho *time.Time `json:"last_echo"` + Version string `json:"version"` + UpgradeVersion string `json:"upgrade_version,omitempty"` + RedundancyZone string `json:"redundancy_zone,omitempty"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_health.go b/vendor/github.com/hashicorp/vault/api/sys_health.go new file mode 100644 index 00000000000..953c1c21eaa --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_health.go @@ -0,0 +1,49 @@ +package api + +import ( + "context" + "net/http" +) + +func (c *Sys) Health() (*HealthResponse, error) { + return c.HealthWithContext(context.Background()) +} + +func (c *Sys) HealthWithContext(ctx context.Context) (*HealthResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/health") + // If the code is 400 or above it will automatically turn into an error, + // but the sys/health API defaults to returning 5xx when not sealed or + // inited, so we force this code to be something else so we parse correctly + r.Params.Add("uninitcode", "299") + r.Params.Add("sealedcode", "299") + r.Params.Add("standbycode", "299") + r.Params.Add("drsecondarycode", "299") + r.Params.Add("performancestandbycode", "299") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result HealthResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type HealthResponse struct { + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + Standby bool `json:"standby"` + PerformanceStandby bool `json:"performance_standby"` + ReplicationPerformanceMode string `json:"replication_performance_mode"` + ReplicationDRMode string `json:"replication_dr_mode"` + ServerTimeUTC int64 `json:"server_time_utc"` + Version string `json:"version"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + LastWAL uint64 `json:"last_wal,omitempty"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_init.go b/vendor/github.com/hashicorp/vault/api/sys_init.go new file mode 100644 index 00000000000..05dea86f6ab --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_init.go @@ -0,0 +1,74 @@ +package api + +import ( + "context" + "net/http" +) + +func (c *Sys) InitStatus() (bool, error) { + return c.InitStatusWithContext(context.Background()) +} + +func (c *Sys) InitStatusWithContext(ctx context.Context) (bool, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return false, err + } + defer resp.Body.Close() + + var result InitStatusResponse + err = resp.DecodeJSON(&result) + return result.Initialized, err +} + +func (c *Sys) Init(opts *InitRequest) (*InitResponse, error) { + return c.InitWithContext(context.Background(), opts) +} + +func (c *Sys) InitWithContext(ctx context.Context, opts *InitRequest) (*InitResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/init") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result InitResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type InitRequest struct { + SecretShares int `json:"secret_shares"` + SecretThreshold int `json:"secret_threshold"` + StoredShares int `json:"stored_shares"` + PGPKeys []string `json:"pgp_keys"` + RecoveryShares int `json:"recovery_shares"` + RecoveryThreshold int `json:"recovery_threshold"` + RecoveryPGPKeys []string `json:"recovery_pgp_keys"` + RootTokenPGPKey string `json:"root_token_pgp_key"` +} + +type InitStatusResponse struct { + Initialized bool +} + +type InitResponse struct { + Keys []string `json:"keys"` + KeysB64 []string `json:"keys_base64"` + RecoveryKeys []string `json:"recovery_keys"` + RecoveryKeysB64 []string `json:"recovery_keys_base64"` + RootToken string `json:"root_token"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_leader.go b/vendor/github.com/hashicorp/vault/api/sys_leader.go new file mode 100644 index 00000000000..a74e206ebed --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_leader.go @@ -0,0 +1,41 @@ +package api + +import ( + "context" + "net/http" + "time" +) + +func (c *Sys) Leader() (*LeaderResponse, error) { + return c.LeaderWithContext(context.Background()) +} + +func (c *Sys) LeaderWithContext(ctx context.Context) (*LeaderResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/leader") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result LeaderResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type LeaderResponse struct { + HAEnabled bool `json:"ha_enabled"` + IsSelf bool `json:"is_self"` + ActiveTime time.Time `json:"active_time"` + LeaderAddress string `json:"leader_address"` + LeaderClusterAddress string `json:"leader_cluster_address"` + PerfStandby bool `json:"performance_standby"` + PerfStandbyLastRemoteWAL uint64 `json:"performance_standby_last_remote_wal"` + LastWAL uint64 `json:"last_wal"` + RaftCommittedIndex uint64 `json:"raft_committed_index,omitempty"` + RaftAppliedIndex uint64 `json:"raft_applied_index,omitempty"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_leases.go b/vendor/github.com/hashicorp/vault/api/sys_leases.go new file mode 100644 index 00000000000..c02402f5314 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_leases.go @@ -0,0 +1,163 @@ +package api + +import ( + "context" + "errors" + "net/http" +) + +func (c *Sys) Renew(id string, increment int) (*Secret, error) { + return c.RenewWithContext(context.Background(), id, increment) +} + +func (c *Sys) RenewWithContext(ctx context.Context, id string, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/renew") + + body := map[string]interface{}{ + "increment": increment, + "lease_id": id, + } + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *Sys) Lookup(id string) (*Secret, error) { + return c.LookupWithContext(context.Background(), id) +} + +func (c *Sys) LookupWithContext(ctx context.Context, id string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/lookup") + + body := map[string]interface{}{ + "lease_id": id, + } + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *Sys) Revoke(id string) error { + return c.RevokeWithContext(context.Background(), id) +} + +func (c *Sys) RevokeWithContext(ctx context.Context, id string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/revoke") + body := map[string]interface{}{ + "lease_id": id, + } + if err := r.SetJSONBody(body); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokePrefix(id string) error { + return c.RevokePrefixWithContext(context.Background(), id) +} + +func (c *Sys) RevokePrefixWithContext(ctx context.Context, id string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/revoke-prefix/"+id) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokeForce(id string) error { + return c.RevokeForceWithContext(context.Background(), id) +} + +func (c *Sys) RevokeForceWithContext(ctx context.Context, id string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/revoke-force/"+id) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokeWithOptions(opts *RevokeOptions) error { + return c.RevokeWithOptionsWithContext(context.Background(), opts) +} + +func (c *Sys) RevokeWithOptionsWithContext(ctx context.Context, opts *RevokeOptions) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + if opts == nil { + return errors.New("nil options provided") + } + + // Construct path + path := "/v1/sys/leases/revoke/" + switch { + case opts.Force: + path = "/v1/sys/leases/revoke-force/" + case opts.Prefix: + path = "/v1/sys/leases/revoke-prefix/" + } + path += opts.LeaseID + + r := c.c.NewRequest(http.MethodPut, path) + if !opts.Force { + body := map[string]interface{}{ + "sync": opts.Sync, + } + if err := r.SetJSONBody(body); err != nil { + return err + } + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +type RevokeOptions struct { + LeaseID string + Force bool + Prefix bool + Sync bool +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_mfa.go b/vendor/github.com/hashicorp/vault/api/sys_mfa.go new file mode 100644 index 00000000000..a1ba1bd80f9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_mfa.go @@ -0,0 +1,45 @@ +package api + +import ( + "context" + "fmt" + "net/http" +) + +func (c *Sys) MFAValidate(requestID string, payload map[string]interface{}) (*Secret, error) { + return c.MFAValidateWithContext(context.Background(), requestID, payload) +} + +func (c *Sys) MFAValidateWithContext(ctx context.Context, requestID string, payload map[string]interface{}) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "mfa_request_id": requestID, + "mfa_payload": payload, + } + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/mfa/validate")) + if err := r.SetJSONBody(body); err != nil { + return nil, fmt.Errorf("failed to set request body: %w", err) + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to parse secret from response: %w", err) + } + + if secret == nil { + return nil, fmt.Errorf("data from server response is empty") + } + + return secret, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_monitor.go b/vendor/github.com/hashicorp/vault/api/sys_monitor.go new file mode 100644 index 00000000000..6813799f014 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_monitor.go @@ -0,0 +1,73 @@ +package api + +import ( + "bufio" + "context" + "fmt" + "net/http" + + "github.com/hashicorp/vault/sdk/helper/logging" +) + +// Monitor returns a channel that outputs strings containing the log messages +// coming from the server. +func (c *Sys) Monitor(ctx context.Context, logLevel string, logFormat string) (chan string, error) { + r := c.c.NewRequest(http.MethodGet, "/v1/sys/monitor") + + if logLevel == "" { + r.Params.Add("log_level", "info") + } else { + r.Params.Add("log_level", logLevel) + } + + if logFormat == "" || logFormat == logging.UnspecifiedFormat.String() { + r.Params.Add("log_format", "standard") + } else { + r.Params.Add("log_format", logFormat) + } + + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + + logCh := make(chan string, 64) + + go func() { + scanner := bufio.NewScanner(resp.Body) + droppedCount := 0 + + defer close(logCh) + defer resp.Body.Close() + + for { + if ctx.Err() != nil { + return + } + + if !scanner.Scan() { + return + } + + logMessage := scanner.Text() + + if droppedCount > 0 { + select { + case logCh <- fmt.Sprintf("Monitor dropped %d logs during monitor request\n", droppedCount): + droppedCount = 0 + default: + droppedCount++ + continue + } + } + + select { + case logCh <- logMessage: + default: + droppedCount++ + } + } + }() + + return logCh, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go new file mode 100644 index 00000000000..75173b4d8f2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go @@ -0,0 +1,320 @@ +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) ListMounts() (map[string]*MountOutput, error) { + return c.ListMountsWithContext(context.Background()) +} + +func (c *Sys) ListMountsWithContext(ctx context.Context) (map[string]*MountOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/mounts") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mounts := map[string]*MountOutput{} + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err + } + + return mounts, nil +} + +func (c *Sys) Mount(path string, mountInfo *MountInput) error { + return c.MountWithContext(context.Background(), path, mountInfo) +} + +func (c *Sys) MountWithContext(ctx context.Context, path string, mountInfo *MountInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/mounts/%s", path)) + if err := r.SetJSONBody(mountInfo); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) Unmount(path string) error { + return c.UnmountWithContext(context.Background(), path) +} + +func (c *Sys) UnmountWithContext(ctx context.Context, path string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/sys/mounts/%s", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// Remount wraps RemountWithContext using context.Background. +func (c *Sys) Remount(from, to string) error { + return c.RemountWithContext(context.Background(), from, to) +} + +// RemountWithContext kicks off a remount operation, polls the status endpoint using +// the migration ID till either success or failure state is observed +func (c *Sys) RemountWithContext(ctx context.Context, from, to string) error { + remountResp, err := c.StartRemountWithContext(ctx, from, to) + if err != nil { + return err + } + + for { + remountStatusResp, err := c.RemountStatusWithContext(ctx, remountResp.MigrationID) + if err != nil { + return err + } + if remountStatusResp.MigrationInfo.MigrationStatus == "success" { + return nil + } + if remountStatusResp.MigrationInfo.MigrationStatus == "failure" { + return fmt.Errorf("Failure! Error encountered moving mount %s to %s, with migration ID %s", from, to, remountResp.MigrationID) + } + time.Sleep(1 * time.Second) + } +} + +// StartRemount wraps StartRemountWithContext using context.Background. +func (c *Sys) StartRemount(from, to string) (*MountMigrationOutput, error) { + return c.StartRemountWithContext(context.Background(), from, to) +} + +// StartRemountWithContext kicks off a mount migration and returns a response with the migration ID +func (c *Sys) StartRemountWithContext(ctx context.Context, from, to string) (*MountMigrationOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "from": from, + "to": to, + } + + r := c.c.NewRequest(http.MethodPost, "/v1/sys/remount") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result MountMigrationOutput + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +// RemountStatus wraps RemountStatusWithContext using context.Background. +func (c *Sys) RemountStatus(migrationID string) (*MountMigrationStatusOutput, error) { + return c.RemountStatusWithContext(context.Background(), migrationID) +} + +// RemountStatusWithContext checks the status of a mount migration operation with the provided ID +func (c *Sys) RemountStatusWithContext(ctx context.Context, migrationID string) (*MountMigrationStatusOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/remount/status/%s", migrationID)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result MountMigrationStatusOutput + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) TuneMount(path string, config MountConfigInput) error { + return c.TuneMountWithContext(context.Background(), path, config) +} + +func (c *Sys) TuneMountWithContext(ctx context.Context, path string, config MountConfigInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) + if err := r.SetJSONBody(config); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) { + return c.MountConfigWithContext(context.Background(), path) +} + +func (c *Sys) MountConfigWithContext(ctx context.Context, path string) (*MountConfigOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result MountConfigOutput + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +type MountInput struct { + Type string `json:"type"` + Description string `json:"description"` + Config MountConfigInput `json:"config"` + Local bool `json:"local"` + SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` + ExternalEntropyAccess bool `json:"external_entropy_access" mapstructure:"external_entropy_access"` + Options map[string]string `json:"options"` + + // Deprecated: Newer server responses should be returning this information in the + // Type field (json: "type") instead. + PluginName string `json:"plugin_name,omitempty"` +} + +type MountConfigInput struct { + Options map[string]string `json:"options" mapstructure:"options"` + DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + Description *string `json:"description,omitempty" mapstructure:"description"` + MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` + AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` + TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` + AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` + PluginVersion string `json:"plugin_version,omitempty"` + + // Deprecated: This field will always be blank for newer server responses. + PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` +} + +type MountOutput struct { + UUID string `json:"uuid"` + Type string `json:"type"` + Description string `json:"description"` + Accessor string `json:"accessor"` + Config MountConfigOutput `json:"config"` + Options map[string]string `json:"options"` + Local bool `json:"local"` + SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` + ExternalEntropyAccess bool `json:"external_entropy_access" mapstructure:"external_entropy_access"` + PluginVersion string `json:"plugin_version" mapstructure:"plugin_version"` + RunningVersion string `json:"running_plugin_version" mapstructure:"running_plugin_version"` + RunningSha256 string `json:"running_sha256" mapstructure:"running_sha256"` + DeprecationStatus string `json:"deprecation_status" mapstructure:"deprecation_status"` +} + +type MountConfigOutput struct { + DefaultLeaseTTL int `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + MaxLeaseTTL int `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` + AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` + TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` + AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` + + // Deprecated: This field will always be blank for newer server responses. + PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` +} + +type MountMigrationOutput struct { + MigrationID string `mapstructure:"migration_id"` +} + +type MountMigrationStatusOutput struct { + MigrationID string `mapstructure:"migration_id"` + MigrationInfo *MountMigrationStatusInfo `mapstructure:"migration_info"` +} + +type MountMigrationStatusInfo struct { + SourceMount string `mapstructure:"source_mount"` + TargetMount string `mapstructure:"target_mount"` + MigrationStatus string `mapstructure:"status"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_plugins.go b/vendor/github.com/hashicorp/vault/api/sys_plugins.go new file mode 100644 index 00000000000..989c78f1d5b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_plugins.go @@ -0,0 +1,380 @@ +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/mitchellh/mapstructure" +) + +// ListPluginsInput is used as input to the ListPlugins function. +type ListPluginsInput struct { + // Type of the plugin. Required. + Type consts.PluginType `json:"type"` +} + +// ListPluginsResponse is the response from the ListPlugins call. +type ListPluginsResponse struct { + // PluginsByType is the list of plugins by type. + PluginsByType map[consts.PluginType][]string `json:"types"` + + Details []PluginDetails `json:"details,omitempty"` + + // Names is the list of names of the plugins. + // + // Deprecated: Newer server responses should be returning PluginsByType (json: + // "types") instead. + Names []string `json:"names"` +} + +type PluginDetails struct { + Type string `json:"type"` + Name string `json:"name"` + Version string `json:"version,omitempty"` + Builtin bool `json:"builtin"` + DeprecationStatus string `json:"deprecation_status,omitempty" mapstructure:"deprecation_status"` +} + +// ListPlugins wraps ListPluginsWithContext using context.Background. +func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) { + return c.ListPluginsWithContext(context.Background(), i) +} + +// ListPluginsWithContext lists all plugins in the catalog and returns their names as a +// list of strings. +func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) (*ListPluginsResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + resp, err := c.c.rawRequestWithContext(ctx, c.c.NewRequest(http.MethodGet, "/v1/sys/plugins/catalog")) + if err != nil && resp == nil { + return nil, err + } + if resp == nil { + return nil, nil + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + result := &ListPluginsResponse{ + PluginsByType: make(map[consts.PluginType][]string), + } + switch i.Type { + case consts.PluginTypeUnknown: + for _, pluginType := range consts.PluginTypes { + pluginsRaw, ok := secret.Data[pluginType.String()] + if !ok { + continue + } + + pluginsIfc, ok := pluginsRaw.([]interface{}) + if !ok { + return nil, fmt.Errorf("unable to parse plugins for %q type", pluginType.String()) + } + + plugins := make([]string, 0, len(pluginsIfc)) + for _, nameIfc := range pluginsIfc { + name, ok := nameIfc.(string) + if !ok { + continue + } + plugins = append(plugins, name) + } + result.PluginsByType[pluginType] = plugins + } + default: + pluginsRaw, ok := secret.Data[i.Type.String()] + if !ok { + return nil, fmt.Errorf("no %s entry in returned data", i.Type.String()) + } + + var respKeys []string + if err := mapstructure.Decode(pluginsRaw, &respKeys); err != nil { + return nil, err + } + result.PluginsByType[i.Type] = respKeys + } + + if detailed, ok := secret.Data["detailed"]; ok { + var details []PluginDetails + if err := mapstructure.Decode(detailed, &details); err != nil { + return nil, err + } + + switch i.Type { + case consts.PluginTypeUnknown: + result.Details = details + default: + // Filter for just the queried type. + for _, entry := range details { + if entry.Type == i.Type.String() { + result.Details = append(result.Details, entry) + } + } + } + } + + return result, nil +} + +// GetPluginInput is used as input to the GetPlugin function. +type GetPluginInput struct { + Name string `json:"-"` + + // Type of the plugin. Required. + Type consts.PluginType `json:"type"` + Version string `json:"version"` +} + +// GetPluginResponse is the response from the GetPlugin call. +type GetPluginResponse struct { + Args []string `json:"args"` + Builtin bool `json:"builtin"` + Command string `json:"command"` + Name string `json:"name"` + SHA256 string `json:"sha256"` + DeprecationStatus string `json:"deprecation_status,omitempty"` + Version string `json:"version,omitempty"` +} + +// GetPlugin wraps GetPluginWithContext using context.Background. +func (c *Sys) GetPlugin(i *GetPluginInput) (*GetPluginResponse, error) { + return c.GetPluginWithContext(context.Background(), i) +} + +// GetPluginWithContext retrieves information about the plugin. +func (c *Sys) GetPluginWithContext(ctx context.Context, i *GetPluginInput) (*GetPluginResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := catalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodGet, path) + if i.Version != "" { + req.Params.Set("version", i.Version) + } + + resp, err := c.c.rawRequestWithContext(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result struct { + Data *GetPluginResponse + } + err = resp.DecodeJSON(&result) + if err != nil { + return nil, err + } + return result.Data, err +} + +// RegisterPluginInput is used as input to the RegisterPlugin function. +type RegisterPluginInput struct { + // Name is the name of the plugin. Required. + Name string `json:"-"` + + // Type of the plugin. Required. + Type consts.PluginType `json:"type"` + + // Args is the list of args to spawn the process with. + Args []string `json:"args,omitempty"` + + // Command is the command to run. + Command string `json:"command,omitempty"` + + // SHA256 is the shasum of the plugin. + SHA256 string `json:"sha256,omitempty"` + + // Version is the optional version of the plugin being registered + Version string `json:"version,omitempty"` +} + +// RegisterPlugin wraps RegisterPluginWithContext using context.Background. +func (c *Sys) RegisterPlugin(i *RegisterPluginInput) error { + return c.RegisterPluginWithContext(context.Background(), i) +} + +// RegisterPluginWithContext registers the plugin with the given information. +func (c *Sys) RegisterPluginWithContext(ctx context.Context, i *RegisterPluginInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := catalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodPut, path) + + if err := req.SetJSONBody(i); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, req) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// DeregisterPluginInput is used as input to the DeregisterPlugin function. +type DeregisterPluginInput struct { + // Name is the name of the plugin. Required. + Name string `json:"-"` + + // Type of the plugin. Required. + Type consts.PluginType `json:"type"` + + // Version of the plugin. Optional. + Version string `json:"version,omitempty"` +} + +// DeregisterPlugin wraps DeregisterPluginWithContext using context.Background. +func (c *Sys) DeregisterPlugin(i *DeregisterPluginInput) error { + return c.DeregisterPluginWithContext(context.Background(), i) +} + +// DeregisterPluginWithContext removes the plugin with the given name from the plugin +// catalog. +func (c *Sys) DeregisterPluginWithContext(ctx context.Context, i *DeregisterPluginInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := catalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodDelete, path) + req.Params.Set("version", i.Version) + resp, err := c.c.rawRequestWithContext(ctx, req) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// ReloadPluginInput is used as input to the ReloadPlugin function. +type ReloadPluginInput struct { + // Plugin is the name of the plugin to reload, as registered in the plugin catalog + Plugin string `json:"plugin"` + + // Mounts is the array of string mount paths of the plugin backends to reload + Mounts []string `json:"mounts"` + + // Scope is the scope of the plugin reload + Scope string `json:"scope"` +} + +// ReloadPlugin wraps ReloadPluginWithContext using context.Background. +func (c *Sys) ReloadPlugin(i *ReloadPluginInput) (string, error) { + return c.ReloadPluginWithContext(context.Background(), i) +} + +// ReloadPluginWithContext reloads mounted plugin backends, possibly returning +// reloadId for a cluster scoped reload +func (c *Sys) ReloadPluginWithContext(ctx context.Context, i *ReloadPluginInput) (string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := "/v1/sys/plugins/reload/backend" + req := c.c.NewRequest(http.MethodPut, path) + + if err := req.SetJSONBody(i); err != nil { + return "", err + } + + resp, err := c.c.rawRequestWithContext(ctx, req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if i.Scope == "global" { + // Get the reload id + secret, parseErr := ParseSecret(resp.Body) + if parseErr != nil { + return "", parseErr + } + if _, ok := secret.Data["reload_id"]; ok { + return secret.Data["reload_id"].(string), nil + } + } + return "", err +} + +// ReloadStatus is the status of an individual node's plugin reload +type ReloadStatus struct { + Timestamp time.Time `json:"timestamp" mapstructure:"timestamp"` + Error string `json:"error" mapstructure:"error"` +} + +// ReloadStatusResponse is the combined response of all known completed plugin reloads +type ReloadStatusResponse struct { + ReloadID string `mapstructure:"reload_id"` + Results map[string]*ReloadStatus `mapstructure:"results"` +} + +// ReloadPluginStatusInput is used as input to the ReloadStatusPlugin function. +type ReloadPluginStatusInput struct { + // ReloadID is the ID of the reload operation + ReloadID string `json:"reload_id"` +} + +// ReloadPluginStatus wraps ReloadPluginStatusWithContext using context.Background. +func (c *Sys) ReloadPluginStatus(reloadStatusInput *ReloadPluginStatusInput) (*ReloadStatusResponse, error) { + return c.ReloadPluginStatusWithContext(context.Background(), reloadStatusInput) +} + +// ReloadPluginStatusWithContext retrieves the status of a reload operation +func (c *Sys) ReloadPluginStatusWithContext(ctx context.Context, reloadStatusInput *ReloadPluginStatusInput) (*ReloadStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := "/v1/sys/plugins/reload/backend/status" + req := c.c.NewRequest(http.MethodGet, path) + req.Params.Add("reload_id", reloadStatusInput.ReloadID) + + resp, err := c.c.rawRequestWithContext(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp != nil { + secret, parseErr := ParseSecret(resp.Body) + if parseErr != nil { + return nil, err + } + + var r ReloadStatusResponse + d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeHookFunc(time.RFC3339), + Result: &r, + }) + if err != nil { + return nil, err + } + err = d.Decode(secret.Data) + if err != nil { + return nil, err + } + return &r, nil + } + return nil, nil +} + +// catalogPathByType is a helper to construct the proper API path by plugin type +func catalogPathByType(pluginType consts.PluginType, name string) string { + path := fmt.Sprintf("/v1/sys/plugins/catalog/%s/%s", pluginType, name) + + // Backwards compat, if type is not provided then use old path + if pluginType == consts.PluginTypeUnknown { + path = fmt.Sprintf("/v1/sys/plugins/catalog/%s", name) + } + + return path +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_policy.go b/vendor/github.com/hashicorp/vault/api/sys_policy.go new file mode 100644 index 00000000000..4a4f91b08c7 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_policy.go @@ -0,0 +1,134 @@ +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) ListPolicies() ([]string, error) { + return c.ListPoliciesWithContext(context.Background()) +} + +func (c *Sys) ListPoliciesWithContext(ctx context.Context) ([]string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest("LIST", "/v1/sys/policies/acl") + // Set this for broader compatibility, but we use LIST above to be able to + // handle the wrapping lookup function + r.Method = http.MethodGet + r.Params.Set("list", "true") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result []string + err = mapstructure.Decode(secret.Data["keys"], &result) + if err != nil { + return nil, err + } + + return result, err +} + +func (c *Sys) GetPolicy(name string) (string, error) { + return c.GetPolicyWithContext(context.Background(), name) +} + +func (c *Sys) GetPolicyWithContext(ctx context.Context, name string) (string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + if resp.StatusCode == 404 { + return "", nil + } + } + if err != nil { + return "", err + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return "", err + } + if secret == nil || secret.Data == nil { + return "", errors.New("data from server response is empty") + } + + if policyRaw, ok := secret.Data["policy"]; ok { + return policyRaw.(string), nil + } + + return "", fmt.Errorf("no policy found in response") +} + +func (c *Sys) PutPolicy(name, rules string) error { + return c.PutPolicyWithContext(context.Background(), name, rules) +} + +func (c *Sys) PutPolicyWithContext(ctx context.Context, name, rules string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]string{ + "policy": rules, + } + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + if err := r.SetJSONBody(body); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DeletePolicy(name string) error { + return c.DeletePolicyWithContext(context.Background(), name) +} + +func (c *Sys) DeletePolicyWithContext(ctx context.Context, name string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +type getPoliciesResp struct { + Rules string `json:"rules"` +} + +type listPoliciesResp struct { + Policies []string `json:"policies"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_raft.go b/vendor/github.com/hashicorp/vault/api/sys_raft.go new file mode 100644 index 00000000000..7806a1418df --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_raft.go @@ -0,0 +1,382 @@ +package api + +import ( + "archive/tar" + "compress/gzip" + "context" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "sync" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/mitchellh/mapstructure" +) + +var ErrIncompleteSnapshot = errors.New("incomplete snapshot, unable to read SHA256SUMS.sealed file") + +// RaftJoinResponse represents the response of the raft join API +type RaftJoinResponse struct { + Joined bool `json:"joined"` +} + +// RaftJoinRequest represents the parameters consumed by the raft join API +type RaftJoinRequest struct { + AutoJoin string `json:"auto_join"` + AutoJoinScheme string `json:"auto_join_scheme"` + AutoJoinPort uint `json:"auto_join_port"` + LeaderAPIAddr string `json:"leader_api_addr"` + LeaderCACert string `json:"leader_ca_cert"` + LeaderClientCert string `json:"leader_client_cert"` + LeaderClientKey string `json:"leader_client_key"` + Retry bool `json:"retry"` + NonVoter bool `json:"non_voter"` +} + +// AutopilotConfig is used for querying/setting the Autopilot configuration. +type AutopilotConfig struct { + CleanupDeadServers bool `json:"cleanup_dead_servers" mapstructure:"cleanup_dead_servers"` + LastContactThreshold time.Duration `json:"last_contact_threshold" mapstructure:"-"` + DeadServerLastContactThreshold time.Duration `json:"dead_server_last_contact_threshold" mapstructure:"-"` + MaxTrailingLogs uint64 `json:"max_trailing_logs" mapstructure:"max_trailing_logs"` + MinQuorum uint `json:"min_quorum" mapstructure:"min_quorum"` + ServerStabilizationTime time.Duration `json:"server_stabilization_time" mapstructure:"-"` + DisableUpgradeMigration bool `json:"disable_upgrade_migration" mapstructure:"disable_upgrade_migration"` +} + +// MarshalJSON makes the autopilot config fields JSON compatible +func (ac *AutopilotConfig) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "cleanup_dead_servers": ac.CleanupDeadServers, + "last_contact_threshold": ac.LastContactThreshold.String(), + "dead_server_last_contact_threshold": ac.DeadServerLastContactThreshold.String(), + "max_trailing_logs": ac.MaxTrailingLogs, + "min_quorum": ac.MinQuorum, + "server_stabilization_time": ac.ServerStabilizationTime.String(), + "disable_upgrade_migration": ac.DisableUpgradeMigration, + }) +} + +// UnmarshalJSON parses the autopilot config JSON blob +func (ac *AutopilotConfig) UnmarshalJSON(b []byte) error { + var data interface{} + err := json.Unmarshal(b, &data) + if err != nil { + return err + } + + conf := data.(map[string]interface{}) + if err = mapstructure.WeakDecode(conf, ac); err != nil { + return err + } + if ac.LastContactThreshold, err = parseutil.ParseDurationSecond(conf["last_contact_threshold"]); err != nil { + return err + } + if ac.DeadServerLastContactThreshold, err = parseutil.ParseDurationSecond(conf["dead_server_last_contact_threshold"]); err != nil { + return err + } + if ac.ServerStabilizationTime, err = parseutil.ParseDurationSecond(conf["server_stabilization_time"]); err != nil { + return err + } + return nil +} + +// AutopilotState represents the response of the raft autopilot state API +type AutopilotState struct { + Healthy bool `mapstructure:"healthy"` + FailureTolerance int `mapstructure:"failure_tolerance"` + Servers map[string]*AutopilotServer `mapstructure:"servers"` + Leader string `mapstructure:"leader"` + Voters []string `mapstructure:"voters"` + NonVoters []string `mapstructure:"non_voters"` + RedundancyZones map[string]AutopilotZone `mapstructure:"redundancy_zones,omitempty"` + Upgrade *AutopilotUpgrade `mapstructure:"upgrade_info,omitempty"` + OptimisticFailureTolerance int `mapstructure:"optimistic_failure_tolerance,omitempty"` +} + +// AutopilotServer represents the server blocks in the response of the raft +// autopilot state API. +type AutopilotServer struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + Address string `mapstructure:"address"` + NodeStatus string `mapstructure:"node_status"` + LastContact string `mapstructure:"last_contact"` + LastTerm uint64 `mapstructure:"last_term"` + LastIndex uint64 `mapstructure:"last_index"` + Healthy bool `mapstructure:"healthy"` + StableSince string `mapstructure:"stable_since"` + Status string `mapstructure:"status"` + Version string `mapstructure:"version"` + UpgradeVersion string `mapstructure:"upgrade_version,omitempty"` + RedundancyZone string `mapstructure:"redundancy_zone,omitempty"` + NodeType string `mapstructure:"node_type,omitempty"` +} + +type AutopilotZone struct { + Servers []string `mapstructure:"servers,omitempty"` + Voters []string `mapstructure:"voters,omitempty"` + FailureTolerance int `mapstructure:"failure_tolerance,omitempty"` +} + +type AutopilotUpgrade struct { + Status string `mapstructure:"status"` + TargetVersion string `mapstructure:"target_version,omitempty"` + TargetVersionVoters []string `mapstructure:"target_version_voters,omitempty"` + TargetVersionNonVoters []string `mapstructure:"target_version_non_voters,omitempty"` + TargetVersionReadReplicas []string `mapstructure:"target_version_read_replicas,omitempty"` + OtherVersionVoters []string `mapstructure:"other_version_voters,omitempty"` + OtherVersionNonVoters []string `mapstructure:"other_version_non_voters,omitempty"` + OtherVersionReadReplicas []string `mapstructure:"other_version_read_replicas,omitempty"` + RedundancyZones map[string]AutopilotZoneUpgradeVersions `mapstructure:"redundancy_zones,omitempty"` +} + +type AutopilotZoneUpgradeVersions struct { + TargetVersionVoters []string `mapstructure:"target_version_voters,omitempty"` + TargetVersionNonVoters []string `mapstructure:"target_version_non_voters,omitempty"` + OtherVersionVoters []string `mapstructure:"other_version_voters,omitempty"` + OtherVersionNonVoters []string `mapstructure:"other_version_non_voters,omitempty"` +} + +// RaftJoin wraps RaftJoinWithContext using context.Background. +func (c *Sys) RaftJoin(opts *RaftJoinRequest) (*RaftJoinResponse, error) { + return c.RaftJoinWithContext(context.Background(), opts) +} + +// RaftJoinWithContext adds the node from which this call is invoked from to the raft +// cluster represented by the leader address in the parameter. +func (c *Sys) RaftJoinWithContext(ctx context.Context, opts *RaftJoinRequest) (*RaftJoinResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/sys/storage/raft/join") + + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RaftJoinResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +// RaftSnapshot wraps RaftSnapshotWithContext using context.Background. +func (c *Sys) RaftSnapshot(snapWriter io.Writer) error { + return c.RaftSnapshotWithContext(context.Background(), snapWriter) +} + +// RaftSnapshotWithContext invokes the API that takes the snapshot of the raft cluster and +// writes it to the supplied io.Writer. +func (c *Sys) RaftSnapshotWithContext(ctx context.Context, snapWriter io.Writer) error { + r := c.c.NewRequest(http.MethodGet, "/v1/sys/storage/raft/snapshot") + r.URL.RawQuery = r.Params.Encode() + + resp, err := c.c.httpRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + // Make sure that the last file in the archive, SHA256SUMS.sealed, is present + // and non-empty. This is to catch cases where the snapshot failed midstream, + // e.g. due to a problem with the seal that prevented encryption of that file. + var wg sync.WaitGroup + wg.Add(1) + var verified bool + + rPipe, wPipe := io.Pipe() + dup := io.TeeReader(resp.Body, wPipe) + go func() { + defer func() { + io.Copy(ioutil.Discard, rPipe) + rPipe.Close() + wg.Done() + }() + + uncompressed, err := gzip.NewReader(rPipe) + if err != nil { + return + } + + t := tar.NewReader(uncompressed) + var h *tar.Header + for { + h, err = t.Next() + if err != nil { + return + } + if h.Name != "SHA256SUMS.sealed" { + continue + } + var b []byte + b, err = ioutil.ReadAll(t) + if err != nil || len(b) == 0 { + return + } + verified = true + return + } + }() + + // Copy bytes from dup to snapWriter. This will have a side effect that + // everything read from dup will be written to wPipe. + _, err = io.Copy(snapWriter, dup) + wPipe.Close() + if err != nil { + rPipe.CloseWithError(err) + return err + } + wg.Wait() + + if !verified { + return ErrIncompleteSnapshot + } + return nil +} + +// RaftSnapshotRestore wraps RaftSnapshotRestoreWithContext using context.Background. +func (c *Sys) RaftSnapshotRestore(snapReader io.Reader, force bool) error { + return c.RaftSnapshotRestoreWithContext(context.Background(), snapReader, force) +} + +// RaftSnapshotRestoreWithContext reads the snapshot from the io.Reader and installs that +// snapshot, returning the cluster to the state defined by it. +func (c *Sys) RaftSnapshotRestoreWithContext(ctx context.Context, snapReader io.Reader, force bool) error { + path := "/v1/sys/storage/raft/snapshot" + if force { + path = "/v1/sys/storage/raft/snapshot-force" + } + + r := c.c.NewRequest(http.MethodPost, path) + r.Body = snapReader + + resp, err := c.c.httpRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RaftAutopilotState wraps RaftAutopilotStateWithContext using context.Background. +func (c *Sys) RaftAutopilotState() (*AutopilotState, error) { + return c.RaftAutopilotStateWithContext(context.Background()) +} + +// RaftAutopilotStateWithContext returns the state of the raft cluster as seen by autopilot. +func (c *Sys) RaftAutopilotStateWithContext(ctx context.Context) (*AutopilotState, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/storage/raft/autopilot/state") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + if resp.StatusCode == 404 { + return nil, nil + } + } + if err != nil { + return nil, err + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result AutopilotState + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +// RaftAutopilotConfiguration wraps RaftAutopilotConfigurationWithContext using context.Background. +func (c *Sys) RaftAutopilotConfiguration() (*AutopilotConfig, error) { + return c.RaftAutopilotConfigurationWithContext(context.Background()) +} + +// RaftAutopilotConfigurationWithContext fetches the autopilot config. +func (c *Sys) RaftAutopilotConfigurationWithContext(ctx context.Context) (*AutopilotConfig, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/storage/raft/autopilot/configuration") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + if resp.StatusCode == 404 { + return nil, nil + } + } + if err != nil { + return nil, err + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil { + return nil, errors.New("data from server response is empty") + } + + var result AutopilotConfig + if err = mapstructure.Decode(secret.Data, &result); err != nil { + return nil, err + } + if result.LastContactThreshold, err = parseutil.ParseDurationSecond(secret.Data["last_contact_threshold"]); err != nil { + return nil, err + } + if result.DeadServerLastContactThreshold, err = parseutil.ParseDurationSecond(secret.Data["dead_server_last_contact_threshold"]); err != nil { + return nil, err + } + if result.ServerStabilizationTime, err = parseutil.ParseDurationSecond(secret.Data["server_stabilization_time"]); err != nil { + return nil, err + } + + return &result, err +} + +// PutRaftAutopilotConfiguration wraps PutRaftAutopilotConfigurationWithContext using context.Background. +func (c *Sys) PutRaftAutopilotConfiguration(opts *AutopilotConfig) error { + return c.PutRaftAutopilotConfigurationWithContext(context.Background(), opts) +} + +// PutRaftAutopilotConfigurationWithContext allows modifying the raft autopilot configuration +func (c *Sys) PutRaftAutopilotConfigurationWithContext(ctx context.Context, opts *AutopilotConfig) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/sys/storage/raft/autopilot/configuration") + + if err := r.SetJSONBody(opts); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_rekey.go b/vendor/github.com/hashicorp/vault/api/sys_rekey.go new file mode 100644 index 00000000000..2ac8a4743bc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_rekey.go @@ -0,0 +1,479 @@ +package api + +import ( + "context" + "errors" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) RekeyStatus() (*RekeyStatusResponse, error) { + return c.RekeyStatusWithContext(context.Background()) +} + +func (c *Sys) RekeyStatusWithContext(ctx context.Context) (*RekeyStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) { + return c.RekeyRecoveryKeyStatusWithContext(context.Background()) +} + +func (c *Sys) RekeyRecoveryKeyStatusWithContext(ctx context.Context) (*RekeyStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey-recovery-key/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyVerificationStatus() (*RekeyVerificationStatusResponse, error) { + return c.RekeyVerificationStatusWithContext(context.Background()) +} + +func (c *Sys) RekeyVerificationStatusWithContext(ctx context.Context) (*RekeyVerificationStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey/verify") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyVerificationStatus() (*RekeyVerificationStatusResponse, error) { + return c.RekeyRecoveryKeyVerificationStatusWithContext(context.Background()) +} + +func (c *Sys) RekeyRecoveryKeyVerificationStatusWithContext(ctx context.Context) (*RekeyVerificationStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey-recovery-key/verify") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { + return c.RekeyInitWithContext(context.Background(), config) +} + +func (c *Sys) RekeyInitWithContext(ctx context.Context, config *RekeyInitRequest) (*RekeyStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey/init") + if err := r.SetJSONBody(config); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { + return c.RekeyRecoveryKeyInitWithContext(context.Background(), config) +} + +func (c *Sys) RekeyRecoveryKeyInitWithContext(ctx context.Context, config *RekeyInitRequest) (*RekeyStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey-recovery-key/init") + if err := r.SetJSONBody(config); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyCancel() error { + return c.RekeyCancelWithContext(context.Background()) +} + +func (c *Sys) RekeyCancelWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyRecoveryKeyCancel() error { + return c.RekeyRecoveryKeyCancelWithContext(context.Background()) +} + +func (c *Sys) RekeyRecoveryKeyCancelWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey-recovery-key/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyVerificationCancel() error { + return c.RekeyVerificationCancelWithContext(context.Background()) +} + +func (c *Sys) RekeyVerificationCancelWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey/verify") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyRecoveryKeyVerificationCancel() error { + return c.RekeyRecoveryKeyVerificationCancelWithContext(context.Background()) +} + +func (c *Sys) RekeyRecoveryKeyVerificationCancelWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey-recovery-key/verify") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { + return c.RekeyUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) RekeyUpdateWithContext(ctx context.Context, shard, nonce string) (*RekeyUpdateResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey/update") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { + return c.RekeyRecoveryKeyUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) RekeyRecoveryKeyUpdateWithContext(ctx context.Context, shard, nonce string) (*RekeyUpdateResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey-recovery-key/update") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRetrieveBackup() (*RekeyRetrieveResponse, error) { + return c.RekeyRetrieveBackupWithContext(context.Background()) +} + +func (c *Sys) RekeyRetrieveBackupWithContext(ctx context.Context) (*RekeyRetrieveResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey/backup") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result RekeyRetrieveResponse + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) RekeyRetrieveRecoveryBackup() (*RekeyRetrieveResponse, error) { + return c.RekeyRetrieveRecoveryBackupWithContext(context.Background()) +} + +func (c *Sys) RekeyRetrieveRecoveryBackupWithContext(ctx context.Context) (*RekeyRetrieveResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey/recovery-key-backup") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result RekeyRetrieveResponse + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) RekeyDeleteBackup() error { + return c.RekeyDeleteBackupWithContext(context.Background()) +} + +func (c *Sys) RekeyDeleteBackupWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey/backup") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + + return err +} + +func (c *Sys) RekeyDeleteRecoveryBackup() error { + return c.RekeyDeleteRecoveryBackupWithContext(context.Background()) +} + +func (c *Sys) RekeyDeleteRecoveryBackupWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey/recovery-key-backup") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + + return err +} + +func (c *Sys) RekeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + return c.RekeyVerificationUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) RekeyVerificationUpdateWithContext(ctx context.Context, shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey/verify") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + return c.RekeyRecoveryKeyVerificationUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) RekeyRecoveryKeyVerificationUpdateWithContext(ctx context.Context, shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey-recovery-key/verify") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type RekeyInitRequest struct { + SecretShares int `json:"secret_shares"` + SecretThreshold int `json:"secret_threshold"` + StoredShares int `json:"stored_shares"` + PGPKeys []string `json:"pgp_keys"` + Backup bool + RequireVerification bool `json:"require_verification"` +} + +type RekeyStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` + Required int `json:"required"` + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool `json:"backup"` + VerificationRequired bool `json:"verification_required"` + VerificationNonce string `json:"verification_nonce"` +} + +type RekeyUpdateResponse struct { + Nonce string `json:"nonce"` + Complete bool `json:"complete"` + Keys []string `json:"keys"` + KeysB64 []string `json:"keys_base64"` + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool `json:"backup"` + VerificationRequired bool `json:"verification_required"` + VerificationNonce string `json:"verification_nonce,omitempty"` +} + +type RekeyRetrieveResponse struct { + Nonce string `json:"nonce" mapstructure:"nonce"` + Keys map[string][]string `json:"keys" mapstructure:"keys"` + KeysB64 map[string][]string `json:"keys_base64" mapstructure:"keys_base64"` +} + +type RekeyVerificationStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` +} + +type RekeyVerificationUpdateResponse struct { + Nonce string `json:"nonce"` + Complete bool `json:"complete"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_rotate.go b/vendor/github.com/hashicorp/vault/api/sys_rotate.go new file mode 100644 index 00000000000..fa86886c35b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_rotate.go @@ -0,0 +1,102 @@ +package api + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "time" +) + +func (c *Sys) Rotate() error { + return c.RotateWithContext(context.Background()) +} + +func (c *Sys) RotateWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/sys/rotate") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) KeyStatus() (*KeyStatus, error) { + return c.KeyStatusWithContext(context.Background()) +} + +func (c *Sys) KeyStatusWithContext(ctx context.Context) (*KeyStatus, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/key-status") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result KeyStatus + + termRaw, ok := secret.Data["term"] + if !ok { + return nil, errors.New("term not found in response") + } + term, ok := termRaw.(json.Number) + if !ok { + return nil, errors.New("could not convert term to a number") + } + term64, err := term.Int64() + if err != nil { + return nil, err + } + result.Term = int(term64) + + installTimeRaw, ok := secret.Data["install_time"] + if !ok { + return nil, errors.New("install_time not found in response") + } + installTimeStr, ok := installTimeRaw.(string) + if !ok { + return nil, errors.New("could not convert install_time to a string") + } + installTime, err := time.Parse(time.RFC3339Nano, installTimeStr) + if err != nil { + return nil, err + } + result.InstallTime = installTime + + encryptionsRaw, ok := secret.Data["encryptions"] + if ok { + encryptions, ok := encryptionsRaw.(json.Number) + if !ok { + return nil, errors.New("could not convert encryptions to a number") + } + encryptions64, err := encryptions.Int64() + if err != nil { + return nil, err + } + result.Encryptions = int(encryptions64) + } + + return &result, err +} + +type KeyStatus struct { + Term int `json:"term"` + InstallTime time.Time `json:"install_time"` + Encryptions int `json:"encryptions"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_seal.go b/vendor/github.com/hashicorp/vault/api/sys_seal.go new file mode 100644 index 00000000000..c772ae0fc26 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_seal.go @@ -0,0 +1,118 @@ +package api + +import ( + "context" + "net/http" +) + +func (c *Sys) SealStatus() (*SealStatusResponse, error) { + return c.SealStatusWithContext(context.Background()) +} + +func (c *Sys) SealStatusWithContext(ctx context.Context) (*SealStatusResponse, error) { + r := c.c.NewRequest(http.MethodGet, "/v1/sys/seal-status") + return sealStatusRequestWithContext(ctx, c, r) +} + +func (c *Sys) Seal() error { + return c.SealWithContext(context.Background()) +} + +func (c *Sys) SealWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/seal") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) ResetUnsealProcess() (*SealStatusResponse, error) { + return c.ResetUnsealProcessWithContext(context.Background()) +} + +func (c *Sys) ResetUnsealProcessWithContext(ctx context.Context) (*SealStatusResponse, error) { + body := map[string]interface{}{"reset": true} + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/unseal") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + return sealStatusRequestWithContext(ctx, c, r) +} + +func (c *Sys) Unseal(shard string) (*SealStatusResponse, error) { + return c.UnsealWithContext(context.Background(), shard) +} + +func (c *Sys) UnsealWithContext(ctx context.Context, shard string) (*SealStatusResponse, error) { + body := map[string]interface{}{"key": shard} + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/unseal") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + return sealStatusRequestWithContext(ctx, c, r) +} + +func (c *Sys) UnsealWithOptions(opts *UnsealOpts) (*SealStatusResponse, error) { + return c.UnsealWithOptionsWithContext(context.Background(), opts) +} + +func (c *Sys) UnsealWithOptionsWithContext(ctx context.Context, opts *UnsealOpts) (*SealStatusResponse, error) { + r := c.c.NewRequest(http.MethodPut, "/v1/sys/unseal") + + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + return sealStatusRequestWithContext(ctx, c, r) +} + +func sealStatusRequestWithContext(ctx context.Context, c *Sys, r *Request) (*SealStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result SealStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type SealStatusResponse struct { + Type string `json:"type"` + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` + Nonce string `json:"nonce"` + Version string `json:"version"` + BuildDate string `json:"build_date"` + Migration bool `json:"migration"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + RecoverySeal bool `json:"recovery_seal"` + StorageType string `json:"storage_type,omitempty"` + HCPLinkStatus string `json:"hcp_link_status,omitempty"` + HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"` +} + +type UnsealOpts struct { + Key string `json:"key"` + Reset bool `json:"reset"` + Migrate bool `json:"migrate"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_stepdown.go b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go new file mode 100644 index 00000000000..833f31a6f76 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go @@ -0,0 +1,23 @@ +package api + +import ( + "context" + "net/http" +) + +func (c *Sys) StepDown() error { + return c.StepDownWithContext(context.Background()) +} + +func (c *Sys) StepDownWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/step-down") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return err +} diff --git a/vendor/github.com/hashicorp/vault/sdk/LICENSE b/vendor/github.com/hashicorp/vault/sdk/LICENSE new file mode 100644 index 00000000000..f4f97ee5853 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/LICENSE @@ -0,0 +1,365 @@ +Copyright (c) 2015 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go new file mode 100644 index 00000000000..58ebc06f2d0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go @@ -0,0 +1,1386 @@ +package certutil + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "math/big" + "net" + "net/url" + "strconv" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/mitchellh/mapstructure" + "golang.org/x/crypto/cryptobyte" + cbasn1 "golang.org/x/crypto/cryptobyte/asn1" +) + +const rsaMinimumSecureKeySize = 2048 + +// Mapping of key types to default key lengths +var defaultAlgorithmKeyBits = map[string]int{ + "rsa": 2048, + "ec": 256, +} + +// Mapping of NIST P-Curve's key length to expected signature bits. +var expectedNISTPCurveHashBits = map[int]int{ + 224: 256, + 256: 256, + 384: 384, + 521: 512, +} + +// Mapping of constant names<->constant values for SignatureAlgorithm +var SignatureAlgorithmNames = map[string]x509.SignatureAlgorithm{ + "sha256withrsa": x509.SHA256WithRSA, + "sha384withrsa": x509.SHA384WithRSA, + "sha512withrsa": x509.SHA512WithRSA, + "ecdsawithsha256": x509.ECDSAWithSHA256, + "ecdsawithsha384": x509.ECDSAWithSHA384, + "ecdsawithsha512": x509.ECDSAWithSHA512, + "sha256withrsapss": x509.SHA256WithRSAPSS, + "sha384withrsapss": x509.SHA384WithRSAPSS, + "sha512withrsapss": x509.SHA512WithRSAPSS, + "pureed25519": x509.PureEd25519, + "ed25519": x509.PureEd25519, // Duplicated for clarity; most won't expect the "Pure" prefix. +} + +// Mapping of constant values<->constant names for SignatureAlgorithm +var InvSignatureAlgorithmNames = map[x509.SignatureAlgorithm]string{ + x509.SHA256WithRSA: "SHA256WithRSA", + x509.SHA384WithRSA: "SHA384WithRSA", + x509.SHA512WithRSA: "SHA512WithRSA", + x509.ECDSAWithSHA256: "ECDSAWithSHA256", + x509.ECDSAWithSHA384: "ECDSAWithSHA384", + x509.ECDSAWithSHA512: "ECDSAWithSHA512", + x509.SHA256WithRSAPSS: "SHA256WithRSAPSS", + x509.SHA384WithRSAPSS: "SHA384WithRSAPSS", + x509.SHA512WithRSAPSS: "SHA512WithRSAPSS", + x509.PureEd25519: "Ed25519", +} + +// OID for RFC 5280 Delta CRL Indicator CRL extension. +// +// > id-ce-deltaCRLIndicator OBJECT IDENTIFIER ::= { id-ce 27 } +var DeltaCRLIndicatorOID = asn1.ObjectIdentifier([]int{2, 5, 29, 27}) + +// GetHexFormatted returns the byte buffer formatted in hex with +// the specified separator between bytes. +func GetHexFormatted(buf []byte, sep string) string { + var ret bytes.Buffer + for _, cur := range buf { + if ret.Len() > 0 { + fmt.Fprintf(&ret, sep) + } + fmt.Fprintf(&ret, "%02x", cur) + } + return ret.String() +} + +// ParseHexFormatted returns the raw bytes from a formatted hex string +func ParseHexFormatted(in, sep string) []byte { + var ret bytes.Buffer + var err error + var inBits int64 + inBytes := strings.Split(in, sep) + for _, inByte := range inBytes { + if inBits, err = strconv.ParseInt(inByte, 16, 8); err != nil { + return nil + } + ret.WriteByte(byte(inBits)) + } + return ret.Bytes() +} + +// GetSubjKeyID returns the subject key ID. The computed ID is the SHA-1 hash of +// the marshaled public key according to +// https://tools.ietf.org/html/rfc5280#section-4.2.1.2 (1) +func GetSubjKeyID(privateKey crypto.Signer) ([]byte, error) { + if privateKey == nil { + return nil, errutil.InternalError{Err: "passed-in private key is nil"} + } + return getSubjectKeyID(privateKey.Public()) +} + +// Returns the explicit SKID when used for cross-signing, else computes a new +// SKID from the key itself. +func getSubjectKeyIDFromBundle(data *CreationBundle) ([]byte, error) { + if len(data.Params.SKID) > 0 { + return data.Params.SKID, nil + } + + return getSubjectKeyID(data.CSR.PublicKey) +} + +func getSubjectKeyID(pub interface{}) ([]byte, error) { + var publicKeyBytes []byte + switch pub := pub.(type) { + case *rsa.PublicKey: + type pkcs1PublicKey struct { + N *big.Int + E int + } + + var err error + publicKeyBytes, err = asn1.Marshal(pkcs1PublicKey{ + N: pub.N, + E: pub.E, + }) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error marshalling public key: %s", err)} + } + case *ecdsa.PublicKey: + publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) + case ed25519.PublicKey: + publicKeyBytes = pub + default: + return nil, errutil.InternalError{Err: fmt.Sprintf("unsupported public key type: %T", pub)} + } + skid := sha1.Sum(publicKeyBytes) + return skid[:], nil +} + +// ParsePKIMap takes a map (for instance, the Secret.Data +// returned from the PKI backend) and returns a ParsedCertBundle. +func ParsePKIMap(data map[string]interface{}) (*ParsedCertBundle, error) { + result := &CertBundle{} + err := mapstructure.Decode(data, result) + if err != nil { + return nil, errutil.UserError{Err: err.Error()} + } + + return result.ToParsedCertBundle() +} + +// ParsePKIJSON takes a JSON-encoded string and returns a ParsedCertBundle. +// +// This can be either the output of an +// issue call from the PKI backend or just its data member; or, +// JSON not coming from the PKI backend. +func ParsePKIJSON(input []byte) (*ParsedCertBundle, error) { + result := &CertBundle{} + err := jsonutil.DecodeJSON(input, &result) + + if err == nil { + return result.ToParsedCertBundle() + } + + var secret Secret + err = jsonutil.DecodeJSON(input, &secret) + + if err == nil { + return ParsePKIMap(secret.Data) + } + + return nil, errutil.UserError{Err: "unable to parse out of either secret data or a secret object"} +} + +func ParseDERKey(privateKeyBytes []byte) (signer crypto.Signer, format BlockType, err error) { + var firstError error + if signer, firstError = x509.ParseECPrivateKey(privateKeyBytes); firstError == nil { + format = ECBlock + return + } + + var secondError error + if signer, secondError = x509.ParsePKCS1PrivateKey(privateKeyBytes); secondError == nil { + format = PKCS1Block + return + } + + var thirdError error + var rawKey interface{} + if rawKey, thirdError = x509.ParsePKCS8PrivateKey(privateKeyBytes); thirdError == nil { + switch rawSigner := rawKey.(type) { + case *rsa.PrivateKey: + signer = rawSigner + case *ecdsa.PrivateKey: + signer = rawSigner + case ed25519.PrivateKey: + signer = rawSigner + default: + return nil, UnknownBlock, errutil.InternalError{Err: "unknown type for parsed PKCS8 Private Key"} + } + + format = PKCS8Block + return + } + + return nil, UnknownBlock, fmt.Errorf("got errors attempting to parse DER private key:\n1. %v\n2. %v\n3. %v", firstError, secondError, thirdError) +} + +func ParsePEMKey(keyPem string) (crypto.Signer, BlockType, error) { + pemBlock, _ := pem.Decode([]byte(keyPem)) + if pemBlock == nil { + return nil, UnknownBlock, errutil.UserError{Err: "no data found in PEM block"} + } + + return ParseDERKey(pemBlock.Bytes) +} + +// ParsePEMBundle takes a string of concatenated PEM-format certificate +// and private key values and decodes/parses them, checking validity along +// the way. The first certificate must be the subject certificate and issuing +// certificates may follow. There must be at most one private key. +func ParsePEMBundle(pemBundle string) (*ParsedCertBundle, error) { + if len(pemBundle) == 0 { + return nil, errutil.UserError{Err: "empty pem bundle"} + } + + pemBytes := []byte(pemBundle) + var pemBlock *pem.Block + parsedBundle := &ParsedCertBundle{} + var certPath []*CertBlock + + for len(pemBytes) > 0 { + pemBlock, pemBytes = pem.Decode(pemBytes) + if pemBlock == nil { + return nil, errutil.UserError{Err: "no data found in PEM block"} + } + + if signer, format, err := ParseDERKey(pemBlock.Bytes); err == nil { + if parsedBundle.PrivateKeyType != UnknownPrivateKey { + return nil, errutil.UserError{Err: "more than one private key given; provide only one private key in the bundle"} + } + + parsedBundle.PrivateKeyFormat = format + parsedBundle.PrivateKeyType = GetPrivateKeyTypeFromSigner(signer) + if parsedBundle.PrivateKeyType == UnknownPrivateKey { + return nil, errutil.UserError{Err: "Unknown type of private key included in the bundle: %v"} + } + + parsedBundle.PrivateKeyBytes = pemBlock.Bytes + parsedBundle.PrivateKey = signer + } else if certificates, err := x509.ParseCertificates(pemBlock.Bytes); err == nil { + certPath = append(certPath, &CertBlock{ + Certificate: certificates[0], + Bytes: pemBlock.Bytes, + }) + } else if x509.IsEncryptedPEMBlock(pemBlock) { + return nil, errutil.UserError{Err: "Encrypted private key given; provide only decrypted private key in the bundle"} + } + } + + for i, certBlock := range certPath { + if i == 0 { + parsedBundle.Certificate = certBlock.Certificate + parsedBundle.CertificateBytes = certBlock.Bytes + } else { + parsedBundle.CAChain = append(parsedBundle.CAChain, certBlock) + } + } + + if err := parsedBundle.Verify(); err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("verification of parsed bundle failed: %s", err)} + } + + return parsedBundle, nil +} + +// GeneratePrivateKey generates a private key with the specified type and key bits. +func GeneratePrivateKey(keyType string, keyBits int, container ParsedPrivateKeyContainer) error { + return generatePrivateKey(keyType, keyBits, container, nil) +} + +// GeneratePrivateKeyWithRandomSource generates a private key with the specified type and key bits. +// GeneratePrivateKeyWithRandomSource uses randomness from the entropyReader to generate the private key. +func GeneratePrivateKeyWithRandomSource(keyType string, keyBits int, container ParsedPrivateKeyContainer, entropyReader io.Reader) error { + return generatePrivateKey(keyType, keyBits, container, entropyReader) +} + +// generatePrivateKey generates a private key with the specified type and key bits. +// generatePrivateKey uses randomness from the entropyReader to generate the private key. +func generatePrivateKey(keyType string, keyBits int, container ParsedPrivateKeyContainer, entropyReader io.Reader) error { + var err error + var privateKeyType PrivateKeyType + var privateKeyBytes []byte + var privateKey crypto.Signer + + var randReader io.Reader = rand.Reader + if entropyReader != nil { + randReader = entropyReader + } + + switch keyType { + case "rsa": + // XXX: there is a false-positive CodeQL path here around keyBits; + // because of a default zero value in the TypeDurationSecond and + // TypeSignedDurationSecond cases of schema.DefaultOrZero(), it + // thinks it is possible to end up with < 2048 bit RSA Key here. + // While this is true for SSH keys, it isn't true for PKI keys + // due to ValidateKeyTypeLength(...) below. While we could close + // the report as a false-positive, enforcing a minimum keyBits size + // here of 2048 would ensure no other paths exist. + if keyBits < 2048 { + return errutil.InternalError{Err: fmt.Sprintf("insecure bit length for RSA private key: %d", keyBits)} + } + privateKeyType = RSAPrivateKey + privateKey, err = rsa.GenerateKey(randReader, keyBits) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error generating RSA private key: %v", err)} + } + privateKeyBytes = x509.MarshalPKCS1PrivateKey(privateKey.(*rsa.PrivateKey)) + case "ec": + privateKeyType = ECPrivateKey + var curve elliptic.Curve + switch keyBits { + case 224: + curve = elliptic.P224() + case 256: + curve = elliptic.P256() + case 384: + curve = elliptic.P384() + case 521: + curve = elliptic.P521() + default: + return errutil.UserError{Err: fmt.Sprintf("unsupported bit length for EC key: %d", keyBits)} + } + privateKey, err = ecdsa.GenerateKey(curve, randReader) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error generating EC private key: %v", err)} + } + privateKeyBytes, err = x509.MarshalECPrivateKey(privateKey.(*ecdsa.PrivateKey)) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error marshalling EC private key: %v", err)} + } + case "ed25519": + privateKeyType = Ed25519PrivateKey + _, privateKey, err = ed25519.GenerateKey(randReader) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error generating ed25519 private key: %v", err)} + } + privateKeyBytes, err = x509.MarshalPKCS8PrivateKey(privateKey.(ed25519.PrivateKey)) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error marshalling Ed25519 private key: %v", err)} + } + default: + return errutil.UserError{Err: fmt.Sprintf("unknown key type: %s", keyType)} + } + + container.SetParsedPrivateKey(privateKey, privateKeyType, privateKeyBytes) + return nil +} + +// GenerateSerialNumber generates a serial number suitable for a certificate +func GenerateSerialNumber() (*big.Int, error) { + return generateSerialNumber(rand.Reader) +} + +// GenerateSerialNumberWithRandomSource generates a serial number suitable +// for a certificate with custom entropy. +func GenerateSerialNumberWithRandomSource(randReader io.Reader) (*big.Int, error) { + return generateSerialNumber(randReader) +} + +func generateSerialNumber(randReader io.Reader) (*big.Int, error) { + serial, err := rand.Int(randReader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error generating serial number: %v", err)} + } + return serial, nil +} + +// ComparePublicKeysAndType compares two public keys and returns true if they match, +// false if their types or contents differ, and an error on unsupported key types. +func ComparePublicKeysAndType(key1Iface, key2Iface crypto.PublicKey) (bool, error) { + equal, err := ComparePublicKeys(key1Iface, key2Iface) + if err != nil { + if strings.Contains(err.Error(), "key types do not match:") { + return false, nil + } + } + + return equal, err +} + +// ComparePublicKeys compares two public keys and returns true if they match, +// returns an error if public key types are mismatched, or they are an unsupported key type. +func ComparePublicKeys(key1Iface, key2Iface crypto.PublicKey) (bool, error) { + switch key1Iface.(type) { + case *rsa.PublicKey: + key1 := key1Iface.(*rsa.PublicKey) + key2, ok := key2Iface.(*rsa.PublicKey) + if !ok { + return false, fmt.Errorf("key types do not match: %T and %T", key1Iface, key2Iface) + } + if key1.N.Cmp(key2.N) != 0 || + key1.E != key2.E { + return false, nil + } + return true, nil + + case *ecdsa.PublicKey: + key1 := key1Iface.(*ecdsa.PublicKey) + key2, ok := key2Iface.(*ecdsa.PublicKey) + if !ok { + return false, fmt.Errorf("key types do not match: %T and %T", key1Iface, key2Iface) + } + if key1.X.Cmp(key2.X) != 0 || + key1.Y.Cmp(key2.Y) != 0 { + return false, nil + } + key1Params := key1.Params() + key2Params := key2.Params() + if key1Params.P.Cmp(key2Params.P) != 0 || + key1Params.N.Cmp(key2Params.N) != 0 || + key1Params.B.Cmp(key2Params.B) != 0 || + key1Params.Gx.Cmp(key2Params.Gx) != 0 || + key1Params.Gy.Cmp(key2Params.Gy) != 0 || + key1Params.BitSize != key2Params.BitSize { + return false, nil + } + return true, nil + case ed25519.PublicKey: + key1 := key1Iface.(ed25519.PublicKey) + key2, ok := key2Iface.(ed25519.PublicKey) + if !ok { + return false, fmt.Errorf("key types do not match: %T and %T", key1Iface, key2Iface) + } + if !key1.Equal(key2) { + return false, nil + } + return true, nil + default: + return false, fmt.Errorf("cannot compare key with type %T", key1Iface) + } +} + +// ParsePublicKeyPEM is used to parse RSA and ECDSA public keys from PEMs +func ParsePublicKeyPEM(data []byte) (interface{}, error) { + block, data := pem.Decode(data) + if block != nil { + if len(bytes.TrimSpace(data)) > 0 { + return nil, errutil.UserError{Err: "unexpected trailing data after parsed PEM block"} + } + var rawKey interface{} + var err error + if rawKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + rawKey = cert.PublicKey + } else { + return nil, err + } + } + + switch key := rawKey.(type) { + case *rsa.PublicKey: + return key, nil + case *ecdsa.PublicKey: + return key, nil + case ed25519.PublicKey: + return key, nil + } + } + return nil, errors.New("data does not contain any valid public keys") +} + +// AddPolicyIdentifiers adds certificate policies extension, based on CreationBundle +func AddPolicyIdentifiers(data *CreationBundle, certTemplate *x509.Certificate) { + oidOnly := true + for _, oidStr := range data.Params.PolicyIdentifiers { + oid, err := StringToOid(oidStr) + if err == nil { + certTemplate.PolicyIdentifiers = append(certTemplate.PolicyIdentifiers, oid) + } + if err != nil { + oidOnly = false + } + } + if !oidOnly { // Because all policy information is held in the same extension, when we use an extra extension to + // add policy qualifier information, that overwrites any information in the PolicyIdentifiers field on the Cert + // Template, so we need to reparse all the policy identifiers here + extension, err := CreatePolicyInformationExtensionFromStorageStrings(data.Params.PolicyIdentifiers) + if err == nil { + // If this errors out, don't add it, rely on the OIDs parsed into PolicyIdentifiers above + certTemplate.ExtraExtensions = append(certTemplate.ExtraExtensions, *extension) + } + } +} + +// AddExtKeyUsageOids adds custom extended key usage OIDs to certificate +func AddExtKeyUsageOids(data *CreationBundle, certTemplate *x509.Certificate) { + for _, oidstr := range data.Params.ExtKeyUsageOIDs { + oid, err := StringToOid(oidstr) + if err == nil { + certTemplate.UnknownExtKeyUsage = append(certTemplate.UnknownExtKeyUsage, oid) + } + } +} + +func HandleOtherCSRSANs(in *x509.CertificateRequest, sans map[string][]string) error { + certTemplate := &x509.Certificate{ + DNSNames: in.DNSNames, + IPAddresses: in.IPAddresses, + EmailAddresses: in.EmailAddresses, + URIs: in.URIs, + } + if err := HandleOtherSANs(certTemplate, sans); err != nil { + return err + } + if len(certTemplate.ExtraExtensions) > 0 { + for _, v := range certTemplate.ExtraExtensions { + in.ExtraExtensions = append(in.ExtraExtensions, v) + } + } + return nil +} + +func HandleOtherSANs(in *x509.Certificate, sans map[string][]string) error { + // If other SANs is empty we return which causes normal Go stdlib parsing + // of the other SAN types + if len(sans) == 0 { + return nil + } + + var rawValues []asn1.RawValue + + // We need to generate an IMPLICIT sequence for compatibility with OpenSSL + // -- it's an open question what the default for RFC 5280 actually is, see + // https://github.com/openssl/openssl/issues/5091 -- so we have to use + // cryptobyte because using the asn1 package's marshaling always produces + // an EXPLICIT sequence. Note that asn1 is way too magical according to + // agl, and cryptobyte is modeled after the CBB/CBS bits that agl put into + // boringssl. + for oid, vals := range sans { + for _, val := range vals { + var b cryptobyte.Builder + oidStr, err := StringToOid(oid) + if err != nil { + return err + } + b.AddASN1ObjectIdentifier(oidStr) + b.AddASN1(cbasn1.Tag(0).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) { + b.AddASN1(cbasn1.UTF8String, func(b *cryptobyte.Builder) { + b.AddBytes([]byte(val)) + }) + }) + m, err := b.Bytes() + if err != nil { + return err + } + rawValues = append(rawValues, asn1.RawValue{Tag: 0, Class: 2, IsCompound: true, Bytes: m}) + } + } + + // If other SANs is empty we return which causes normal Go stdlib parsing + // of the other SAN types + if len(rawValues) == 0 { + return nil + } + + // Append any existing SANs, sans marshalling + rawValues = append(rawValues, marshalSANs(in.DNSNames, in.EmailAddresses, in.IPAddresses, in.URIs)...) + + // Marshal and add to ExtraExtensions + ext := pkix.Extension{ + // This is the defined OID for subjectAltName + Id: asn1.ObjectIdentifier{2, 5, 29, 17}, + } + var err error + ext.Value, err = asn1.Marshal(rawValues) + if err != nil { + return err + } + in.ExtraExtensions = append(in.ExtraExtensions, ext) + + return nil +} + +// Note: Taken from the Go source code since it's not public, and used in the +// modified function below (which also uses these consts upstream) +const ( + nameTypeEmail = 1 + nameTypeDNS = 2 + nameTypeURI = 6 + nameTypeIP = 7 +) + +// Note: Taken from the Go source code since it's not public, plus changed to not marshal +// marshalSANs marshals a list of addresses into a the contents of an X.509 +// SubjectAlternativeName extension. +func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) []asn1.RawValue { + var rawValues []asn1.RawValue + for _, name := range dnsNames { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeDNS, Class: 2, Bytes: []byte(name)}) + } + for _, email := range emailAddresses { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeEmail, Class: 2, Bytes: []byte(email)}) + } + for _, rawIP := range ipAddresses { + // If possible, we always want to encode IPv4 addresses in 4 bytes. + ip := rawIP.To4() + if ip == nil { + ip = rawIP + } + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeIP, Class: 2, Bytes: ip}) + } + for _, uri := range uris { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: 2, Bytes: []byte(uri.String())}) + } + return rawValues +} + +func StringToOid(in string) (asn1.ObjectIdentifier, error) { + split := strings.Split(in, ".") + ret := make(asn1.ObjectIdentifier, 0, len(split)) + for _, v := range split { + i, err := strconv.Atoi(v) + if err != nil { + return nil, err + } + ret = append(ret, i) + } + return asn1.ObjectIdentifier(ret), nil +} + +// Returns default key bits for the specified key type, or the present value +// if keyBits is non-zero. +func DefaultOrValueKeyBits(keyType string, keyBits int) (int, error) { + if keyBits == 0 { + newValue, present := defaultAlgorithmKeyBits[keyType] + if present { + keyBits = newValue + } /* else { + // We cannot return an error here as ed25519 (and potentially ed448 + // in the future) aren't in defaultAlgorithmKeyBits -- the value of + // the keyBits parameter is ignored under that algorithm. + } */ + } + + return keyBits, nil +} + +// Returns default signature hash bit length for the specified key type and +// bits, or the present value if hashBits is non-zero. Returns an error under +// certain internal circumstances. +func DefaultOrValueHashBits(keyType string, keyBits int, hashBits int) (int, error) { + if keyType == "ec" { + // Enforcement of curve moved to selectSignatureAlgorithmForECDSA. See + // note there about why. + } else if keyType == "rsa" && hashBits == 0 { + // To match previous behavior (and ignoring NIST's recommendations for + // hash size to align with RSA key sizes), default to SHA-2-256. + hashBits = 256 + } else if keyType == "ed25519" || keyType == "ed448" || keyType == "any" { + // No-op; ed25519 and ed448 internally specify their own hash and + // we do not need to select one. Double hashing isn't supported in + // certificate signing. Additionally, the any key type can't know + // what hash algorithm to use yet, so default to zero. + return 0, nil + } + + return hashBits, nil +} + +// Validates that the combination of keyType, keyBits, and hashBits are +// valid together; replaces individual calls to ValidateSignatureLength and +// ValidateKeyTypeLength. Also updates the value of keyBits and hashBits on +// return. +func ValidateDefaultOrValueKeyTypeSignatureLength(keyType string, keyBits int, hashBits int) (int, int, error) { + var err error + + if keyBits, err = DefaultOrValueKeyBits(keyType, keyBits); err != nil { + return keyBits, hashBits, err + } + + if err = ValidateKeyTypeLength(keyType, keyBits); err != nil { + return keyBits, hashBits, err + } + + if hashBits, err = DefaultOrValueHashBits(keyType, keyBits, hashBits); err != nil { + return keyBits, hashBits, err + } + + // Note that this check must come after we've selected a value for + // hashBits above, in the event it was left as the default, but we + // were allowed to update it. + if err = ValidateSignatureLength(keyType, hashBits); err != nil { + return keyBits, hashBits, err + } + + return keyBits, hashBits, nil +} + +// Validates that the length of the hash (in bits) used in the signature +// calculation is a known, approved value. +func ValidateSignatureLength(keyType string, hashBits int) error { + if keyType == "any" || keyType == "ec" || keyType == "ed25519" || keyType == "ed448" { + // ed25519 and ed448 include built-in hashing and is not externally + // configurable. There are three modes for each of these schemes: + // + // 1. Built-in hash (default, used in TLS, x509). + // 2. Double hash (notably used in some block-chain implementations, + // but largely regarded as a specialized use case with security + // concerns). + // 3. No hash (bring your own hash function, less commonly used). + // + // In all cases, we won't have a hash algorithm to validate here, so + // return nil. + // + // Additionally, when KeyType is any, we can't yet validate the + // signature algorithm size, so it takes the default zero value. + // + // When KeyType is ec, we also can't validate this value as we're + // forcefully ignoring the users' choice and specifying a value based + // on issuer type. + return nil + } + + switch hashBits { + case 256: + case 384: + case 512: + default: + return fmt.Errorf("unsupported hash signature algorithm: %d", hashBits) + } + + return nil +} + +func ValidateKeyTypeLength(keyType string, keyBits int) error { + switch keyType { + case "rsa": + if keyBits < rsaMinimumSecureKeySize { + return fmt.Errorf("RSA keys < %d bits are unsafe and not supported: got %d", rsaMinimumSecureKeySize, keyBits) + } + + switch keyBits { + case 2048: + case 3072: + case 4096: + case 8192: + default: + return fmt.Errorf("unsupported bit length for RSA key: %d", keyBits) + } + case "ec": + _, present := expectedNISTPCurveHashBits[keyBits] + if !present { + return fmt.Errorf("unsupported bit length for EC key: %d", keyBits) + } + case "any", "ed25519": + default: + return fmt.Errorf("unknown key type %s", keyType) + } + + return nil +} + +// CreateCertificate uses CreationBundle and the default rand.Reader to +// generate a cert/keypair. +func CreateCertificate(data *CreationBundle) (*ParsedCertBundle, error) { + return createCertificate(data, rand.Reader, generatePrivateKey) +} + +// CreateCertificateWithRandomSource uses CreationBundle and a custom +// io.Reader for randomness to generate a cert/keypair. +func CreateCertificateWithRandomSource(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) { + return createCertificate(data, randReader, generatePrivateKey) +} + +// KeyGenerator Allow us to override how/what generates the private key +type KeyGenerator func(keyType string, keyBits int, container ParsedPrivateKeyContainer, entropyReader io.Reader) error + +func CreateCertificateWithKeyGenerator(data *CreationBundle, randReader io.Reader, keyGenerator KeyGenerator) (*ParsedCertBundle, error) { + return createCertificate(data, randReader, keyGenerator) +} + +// Set correct RSA sig algo +func certTemplateSetSigAlgo(certTemplate *x509.Certificate, data *CreationBundle) { + if data.Params.UsePSS { + switch data.Params.SignatureBits { + case 256: + certTemplate.SignatureAlgorithm = x509.SHA256WithRSAPSS + case 384: + certTemplate.SignatureAlgorithm = x509.SHA384WithRSAPSS + case 512: + certTemplate.SignatureAlgorithm = x509.SHA512WithRSAPSS + } + } else { + switch data.Params.SignatureBits { + case 256: + certTemplate.SignatureAlgorithm = x509.SHA256WithRSA + case 384: + certTemplate.SignatureAlgorithm = x509.SHA384WithRSA + case 512: + certTemplate.SignatureAlgorithm = x509.SHA512WithRSA + } + } +} + +// selectSignatureAlgorithmForRSA returns the proper x509.SignatureAlgorithm based on various properties set in the +// Creation Bundle parameter. This method will default to a SHA256 signature algorithm if the requested signature +// bits is not set/unknown. +func selectSignatureAlgorithmForRSA(data *CreationBundle) x509.SignatureAlgorithm { + if data.Params.UsePSS { + switch data.Params.SignatureBits { + case 256: + return x509.SHA256WithRSAPSS + case 384: + return x509.SHA384WithRSAPSS + case 512: + return x509.SHA512WithRSAPSS + default: + return x509.SHA256WithRSAPSS + } + } + + switch data.Params.SignatureBits { + case 256: + return x509.SHA256WithRSA + case 384: + return x509.SHA384WithRSA + case 512: + return x509.SHA512WithRSA + default: + return x509.SHA256WithRSA + } +} + +func createCertificate(data *CreationBundle, randReader io.Reader, privateKeyGenerator KeyGenerator) (*ParsedCertBundle, error) { + var err error + result := &ParsedCertBundle{} + + serialNumber, err := GenerateSerialNumber() + if err != nil { + return nil, err + } + + if err := privateKeyGenerator(data.Params.KeyType, + data.Params.KeyBits, + result, randReader); err != nil { + return nil, err + } + + subjKeyID, err := GetSubjKeyID(result.PrivateKey) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error getting subject key ID: %s", err)} + } + + certTemplate := &x509.Certificate{ + SerialNumber: serialNumber, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: data.Params.NotAfter, + IsCA: false, + SubjectKeyId: subjKeyID, + Subject: data.Params.Subject, + DNSNames: data.Params.DNSNames, + EmailAddresses: data.Params.EmailAddresses, + IPAddresses: data.Params.IPAddresses, + URIs: data.Params.URIs, + } + if data.Params.NotBeforeDuration > 0 { + certTemplate.NotBefore = time.Now().Add(-1 * data.Params.NotBeforeDuration) + } + + if err := HandleOtherSANs(certTemplate, data.Params.OtherSANs); err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} + } + + // Add this before calling addKeyUsages + if data.SigningBundle == nil { + certTemplate.IsCA = true + } else if data.Params.BasicConstraintsValidForNonCA { + certTemplate.BasicConstraintsValid = true + certTemplate.IsCA = false + } + + // This will only be filled in from the generation paths + if len(data.Params.PermittedDNSDomains) > 0 { + certTemplate.PermittedDNSDomains = data.Params.PermittedDNSDomains + certTemplate.PermittedDNSDomainsCritical = true + } + + AddPolicyIdentifiers(data, certTemplate) + + AddKeyUsages(data, certTemplate) + + AddExtKeyUsageOids(data, certTemplate) + + certTemplate.IssuingCertificateURL = data.Params.URLs.IssuingCertificates + certTemplate.CRLDistributionPoints = data.Params.URLs.CRLDistributionPoints + certTemplate.OCSPServer = data.Params.URLs.OCSPServers + + var certBytes []byte + if data.SigningBundle != nil { + privateKeyType := data.SigningBundle.PrivateKeyType + if privateKeyType == ManagedPrivateKey { + privateKeyType = GetPrivateKeyTypeFromSigner(data.SigningBundle.PrivateKey) + } + switch privateKeyType { + case RSAPrivateKey: + certTemplateSetSigAlgo(certTemplate, data) + case Ed25519PrivateKey: + certTemplate.SignatureAlgorithm = x509.PureEd25519 + case ECPrivateKey: + certTemplate.SignatureAlgorithm = selectSignatureAlgorithmForECDSA(data.SigningBundle.PrivateKey.Public(), data.Params.SignatureBits) + } + + caCert := data.SigningBundle.Certificate + certTemplate.AuthorityKeyId = caCert.SubjectKeyId + + certBytes, err = x509.CreateCertificate(randReader, certTemplate, caCert, result.PrivateKey.Public(), data.SigningBundle.PrivateKey) + } else { + // Creating a self-signed root + if data.Params.MaxPathLength == 0 { + certTemplate.MaxPathLen = 0 + certTemplate.MaxPathLenZero = true + } else { + certTemplate.MaxPathLen = data.Params.MaxPathLength + } + + switch data.Params.KeyType { + case "rsa": + certTemplateSetSigAlgo(certTemplate, data) + case "ed25519": + certTemplate.SignatureAlgorithm = x509.PureEd25519 + case "ec": + certTemplate.SignatureAlgorithm = selectSignatureAlgorithmForECDSA(result.PrivateKey.Public(), data.Params.SignatureBits) + } + + certTemplate.AuthorityKeyId = subjKeyID + certTemplate.BasicConstraintsValid = true + certBytes, err = x509.CreateCertificate(randReader, certTemplate, certTemplate, result.PrivateKey.Public(), result.PrivateKey) + } + + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} + } + + result.CertificateBytes = certBytes + result.Certificate, err = x509.ParseCertificate(certBytes) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)} + } + + if data.SigningBundle != nil { + if (len(data.SigningBundle.Certificate.AuthorityKeyId) > 0 && + !bytes.Equal(data.SigningBundle.Certificate.AuthorityKeyId, data.SigningBundle.Certificate.SubjectKeyId)) || + data.Params.ForceAppendCaChain { + var chain []*CertBlock + + signingChain := data.SigningBundle.CAChain + // Some bundles already include the root included in the chain, so don't include it twice. + if len(signingChain) == 0 || !bytes.Equal(signingChain[0].Bytes, data.SigningBundle.CertificateBytes) { + chain = append(chain, &CertBlock{ + Certificate: data.SigningBundle.Certificate, + Bytes: data.SigningBundle.CertificateBytes, + }) + } + + if len(signingChain) > 0 { + chain = append(chain, signingChain...) + } + + result.CAChain = chain + } + } + + return result, nil +} + +func selectSignatureAlgorithmForECDSA(pub crypto.PublicKey, signatureBits int) x509.SignatureAlgorithm { + // Previously we preferred the user-specified signature bits for ECDSA + // keys. However, this could result in using a longer hash function than + // the underlying NIST P-curve will encode (e.g., a SHA-512 hash with a + // P-256 key). This isn't ideal: the hash is implicitly truncated + // (effectively turning it into SHA-512/256) and we then need to rely + // on the prefix security of the hash. Since both NIST and Mozilla guidance + // suggest instead using the correct hash function, we should prefer that + // over the operator-specified signatureBits. + // + // Lastly, note that pub above needs to be the _signer's_ public key; + // the issue with DefaultOrValueHashBits is that it is called at role + // configuration time, which might _precede_ issuer generation. Thus + // it only has access to the desired key type and not the actual issuer. + // The reference from that function is reproduced below: + // + // > To comply with BSI recommendations Section 4.2 and Mozilla root + // > store policy section 5.1.2, enforce that NIST P-curves use a hash + // > length corresponding to curve length. Note that ed25519 does not + // > implement the "ec" key type. + key, ok := pub.(*ecdsa.PublicKey) + if !ok { + return x509.ECDSAWithSHA256 + } + switch key.Curve { + case elliptic.P224(), elliptic.P256(): + return x509.ECDSAWithSHA256 + case elliptic.P384(): + return x509.ECDSAWithSHA384 + case elliptic.P521(): + return x509.ECDSAWithSHA512 + default: + return x509.ECDSAWithSHA256 + } +} + +var ( + oidExtensionBasicConstraints = []int{2, 5, 29, 19} + oidExtensionSubjectAltName = []int{2, 5, 29, 17} +) + +// CreateCSR creates a CSR with the default rand.Reader to +// generate a cert/keypair. This is currently only meant +// for use when generating an intermediate certificate. +func CreateCSR(data *CreationBundle, addBasicConstraints bool) (*ParsedCSRBundle, error) { + return createCSR(data, addBasicConstraints, rand.Reader, generatePrivateKey) +} + +// CreateCSRWithRandomSource creates a CSR with a custom io.Reader +// for randomness to generate a cert/keypair. +func CreateCSRWithRandomSource(data *CreationBundle, addBasicConstraints bool, randReader io.Reader) (*ParsedCSRBundle, error) { + return createCSR(data, addBasicConstraints, randReader, generatePrivateKey) +} + +// CreateCSRWithKeyGenerator creates a CSR with a custom io.Reader +// for randomness to generate a cert/keypair with the provided private key generator. +func CreateCSRWithKeyGenerator(data *CreationBundle, addBasicConstraints bool, randReader io.Reader, keyGenerator KeyGenerator) (*ParsedCSRBundle, error) { + return createCSR(data, addBasicConstraints, randReader, keyGenerator) +} + +func createCSR(data *CreationBundle, addBasicConstraints bool, randReader io.Reader, keyGenerator KeyGenerator) (*ParsedCSRBundle, error) { + var err error + result := &ParsedCSRBundle{} + + if err := keyGenerator(data.Params.KeyType, + data.Params.KeyBits, + result, randReader); err != nil { + return nil, err + } + + // Like many root CAs, other information is ignored + csrTemplate := &x509.CertificateRequest{ + Subject: data.Params.Subject, + DNSNames: data.Params.DNSNames, + EmailAddresses: data.Params.EmailAddresses, + IPAddresses: data.Params.IPAddresses, + URIs: data.Params.URIs, + } + + if err := HandleOtherCSRSANs(csrTemplate, data.Params.OtherSANs); err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} + } + + if addBasicConstraints { + type basicConstraints struct { + IsCA bool `asn1:"optional"` + MaxPathLen int `asn1:"optional,default:-1"` + } + val, err := asn1.Marshal(basicConstraints{IsCA: true, MaxPathLen: -1}) + if err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling basic constraints: {{err}}", err).Error()} + } + ext := pkix.Extension{ + Id: oidExtensionBasicConstraints, + Value: val, + Critical: true, + } + csrTemplate.ExtraExtensions = append(csrTemplate.ExtraExtensions, ext) + } + + switch data.Params.KeyType { + case "rsa": + // use specified RSA algorithm defaulting to the appropriate SHA256 RSA signature type + csrTemplate.SignatureAlgorithm = selectSignatureAlgorithmForRSA(data) + case "ec": + csrTemplate.SignatureAlgorithm = selectSignatureAlgorithmForECDSA(result.PrivateKey.Public(), data.Params.SignatureBits) + case "ed25519": + csrTemplate.SignatureAlgorithm = x509.PureEd25519 + } + + csr, err := x509.CreateCertificateRequest(randReader, csrTemplate, result.PrivateKey) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} + } + + result.CSRBytes = csr + result.CSR, err = x509.ParseCertificateRequest(csr) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %v", err)} + } + + if err = result.CSR.CheckSignature(); err != nil { + return nil, errors.New("failed signature validation for CSR") + } + + return result, nil +} + +// SignCertificate performs the heavy lifting +// of generating a certificate from a CSR. +// Returns a ParsedCertBundle sans private keys. +func SignCertificate(data *CreationBundle) (*ParsedCertBundle, error) { + return signCertificate(data, rand.Reader) +} + +// SignCertificateWithRandomSource generates a certificate +// from a CSR, using custom randomness from the randReader. +// Returns a ParsedCertBundle sans private keys. +func SignCertificateWithRandomSource(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) { + return signCertificate(data, randReader) +} + +func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) { + switch { + case data == nil: + return nil, errutil.UserError{Err: "nil data bundle given to signCertificate"} + case data.Params == nil: + return nil, errutil.UserError{Err: "nil parameters given to signCertificate"} + case data.SigningBundle == nil: + return nil, errutil.UserError{Err: "nil signing bundle given to signCertificate"} + case data.CSR == nil: + return nil, errutil.UserError{Err: "nil csr given to signCertificate"} + } + + err := data.CSR.CheckSignature() + if err != nil { + return nil, errutil.UserError{Err: "request signature invalid"} + } + + result := &ParsedCertBundle{} + + serialNumber, err := GenerateSerialNumber() + if err != nil { + return nil, err + } + + subjKeyID, err := getSubjectKeyIDFromBundle(data) + if err != nil { + return nil, err + } + + caCert := data.SigningBundle.Certificate + + certTemplate := &x509.Certificate{ + SerialNumber: serialNumber, + Subject: data.Params.Subject, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: data.Params.NotAfter, + SubjectKeyId: subjKeyID[:], + AuthorityKeyId: caCert.SubjectKeyId, + } + if data.Params.NotBeforeDuration > 0 { + certTemplate.NotBefore = time.Now().Add(-1 * data.Params.NotBeforeDuration) + } + + privateKeyType := data.SigningBundle.PrivateKeyType + if privateKeyType == ManagedPrivateKey { + privateKeyType = GetPrivateKeyTypeFromSigner(data.SigningBundle.PrivateKey) + } + + switch privateKeyType { + case RSAPrivateKey: + certTemplateSetSigAlgo(certTemplate, data) + case ECPrivateKey: + switch data.Params.SignatureBits { + case 256: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 + case 384: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA384 + case 512: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA512 + } + } + + if data.Params.UseCSRValues { + certTemplate.Subject = data.CSR.Subject + certTemplate.Subject.ExtraNames = certTemplate.Subject.Names + + certTemplate.DNSNames = data.CSR.DNSNames + certTemplate.EmailAddresses = data.CSR.EmailAddresses + certTemplate.IPAddresses = data.CSR.IPAddresses + certTemplate.URIs = data.CSR.URIs + + for _, name := range data.CSR.Extensions { + if !name.Id.Equal(oidExtensionBasicConstraints) && !(len(data.Params.OtherSANs) > 0 && name.Id.Equal(oidExtensionSubjectAltName)) { + certTemplate.ExtraExtensions = append(certTemplate.ExtraExtensions, name) + } + } + + } else { + certTemplate.DNSNames = data.Params.DNSNames + certTemplate.EmailAddresses = data.Params.EmailAddresses + certTemplate.IPAddresses = data.Params.IPAddresses + certTemplate.URIs = data.Params.URIs + } + + if err := HandleOtherSANs(certTemplate, data.Params.OtherSANs); err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} + } + + AddPolicyIdentifiers(data, certTemplate) + + AddKeyUsages(data, certTemplate) + + AddExtKeyUsageOids(data, certTemplate) + + var certBytes []byte + + certTemplate.IssuingCertificateURL = data.Params.URLs.IssuingCertificates + certTemplate.CRLDistributionPoints = data.Params.URLs.CRLDistributionPoints + certTemplate.OCSPServer = data.SigningBundle.URLs.OCSPServers + + if data.Params.IsCA { + certTemplate.BasicConstraintsValid = true + certTemplate.IsCA = true + + if data.SigningBundle.Certificate.MaxPathLen == 0 && + data.SigningBundle.Certificate.MaxPathLenZero { + return nil, errutil.UserError{Err: "signing certificate has a max path length of zero, and cannot issue further CA certificates"} + } + + certTemplate.MaxPathLen = data.Params.MaxPathLength + if certTemplate.MaxPathLen == 0 { + certTemplate.MaxPathLenZero = true + } + } else if data.Params.BasicConstraintsValidForNonCA { + certTemplate.BasicConstraintsValid = true + certTemplate.IsCA = false + } + + if len(data.Params.PermittedDNSDomains) > 0 { + certTemplate.PermittedDNSDomains = data.Params.PermittedDNSDomains + certTemplate.PermittedDNSDomainsCritical = true + } + + certBytes, err = x509.CreateCertificate(randReader, certTemplate, caCert, data.CSR.PublicKey, data.SigningBundle.PrivateKey) + + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} + } + + result.CertificateBytes = certBytes + result.Certificate, err = x509.ParseCertificate(certBytes) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)} + } + + result.CAChain = data.SigningBundle.GetFullChain() + + return result, nil +} + +func NewCertPool(reader io.Reader) (*x509.CertPool, error) { + pemBlock, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + certs, err := parseCertsPEM(pemBlock) + if err != nil { + return nil, fmt.Errorf("error reading certs: %s", err) + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} + +// parseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array +// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates +func parseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { + ok := false + certs := []*x509.Certificate{} + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + // Only use PEM "CERTIFICATE" blocks without extra headers + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return certs, err + } + + certs = append(certs, cert) + ok = true + } + + if !ok { + return certs, errors.New("data does not contain any valid RSA or ECDSA certificates") + } + return certs, nil +} + +// GetPublicKeySize returns the key size in bits for a given arbitrary crypto.PublicKey +// Returns -1 for an unsupported key type. +func GetPublicKeySize(key crypto.PublicKey) int { + if key, ok := key.(*rsa.PublicKey); ok { + return key.Size() * 8 + } + if key, ok := key.(*ecdsa.PublicKey); ok { + return key.Params().BitSize + } + if key, ok := key.(ed25519.PublicKey); ok { + return len(key) * 8 + } + if key, ok := key.(dsa.PublicKey); ok { + return key.Y.BitLen() + } + + return -1 +} + +// CreateKeyBundle create a KeyBundle struct object which includes a generated key +// of keyType with keyBits leveraging the randomness from randReader. +func CreateKeyBundle(keyType string, keyBits int, randReader io.Reader) (KeyBundle, error) { + return CreateKeyBundleWithKeyGenerator(keyType, keyBits, randReader, generatePrivateKey) +} + +// CreateKeyBundleWithKeyGenerator create a KeyBundle struct object which includes +// a generated key of keyType with keyBits leveraging the randomness from randReader and +// delegates the actual key generation to keyGenerator +func CreateKeyBundleWithKeyGenerator(keyType string, keyBits int, randReader io.Reader, keyGenerator KeyGenerator) (KeyBundle, error) { + result := KeyBundle{} + if err := keyGenerator(keyType, keyBits, &result, randReader); err != nil { + return result, err + } + return result, nil +} + +// CreateDeltaCRLIndicatorExt allows creating correctly formed delta CRLs +// that point back to the last complete CRL that they're based on. +func CreateDeltaCRLIndicatorExt(completeCRLNumber int64) (pkix.Extension, error) { + bigNum := big.NewInt(completeCRLNumber) + bigNumValue, err := asn1.Marshal(bigNum) + if err != nil { + return pkix.Extension{}, fmt.Errorf("unable to marshal complete CRL number (%v): %v", completeCRLNumber, err) + } + return pkix.Extension{ + Id: DeltaCRLIndicatorOID, + // > When a conforming CRL issuer generates a delta CRL, the delta + // > CRL MUST include a critical delta CRL indicator extension. + Critical: true, + // This extension only includes the complete CRL number: + // + // > BaseCRLNumber ::= CRLNumber + // + // But, this needs to be encoded as a big number for encoding/asn1 + // to work properly. + Value: bigNumValue, + }, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go new file mode 100644 index 00000000000..15b816f0c8e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go @@ -0,0 +1,1015 @@ +// Package certutil contains helper functions that are mostly used +// with the PKI backend but can be generally useful. Functionality +// includes helpers for converting a certificate/private key bundle +// between DER and PEM, printing certificate serial numbers, and more. +// +// Functionality specific to the PKI backend includes some types +// and helper methods to make requesting certificates from the +// backend easy. +package certutil + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net" + "net/url" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/errutil" +) + +const ( + PrivateKeyTypeP521 = "p521" +) + +// This can be one of a few key types so the different params may or may not be filled +type ClusterKeyParams struct { + Type string `json:"type" structs:"type" mapstructure:"type"` + X *big.Int `json:"x" structs:"x" mapstructure:"x"` + Y *big.Int `json:"y" structs:"y" mapstructure:"y"` + D *big.Int `json:"d" structs:"d" mapstructure:"d"` +} + +// Secret is used to attempt to unmarshal a Vault secret +// JSON response, as a convenience +type Secret struct { + Data map[string]interface{} `json:"data"` +} + +// PrivateKeyType holds a string representation of the type of private key (ec +// or rsa) referenced in CertBundle and ParsedCertBundle. This uses colloquial +// names rather than official names, to eliminate confusion +type PrivateKeyType string + +// Well-known PrivateKeyTypes +const ( + UnknownPrivateKey PrivateKeyType = "" + RSAPrivateKey PrivateKeyType = "rsa" + ECPrivateKey PrivateKeyType = "ec" + Ed25519PrivateKey PrivateKeyType = "ed25519" + ManagedPrivateKey PrivateKeyType = "ManagedPrivateKey" +) + +// TLSUsage controls whether the intended usage of a *tls.Config +// returned from ParsedCertBundle.getTLSConfig is for server use, +// client use, or both, which affects which values are set +type TLSUsage int + +// Well-known TLSUsage types +const ( + TLSUnknown TLSUsage = 0 + TLSServer TLSUsage = 1 << iota + TLSClient +) + +// BlockType indicates the serialization format of the key +type BlockType string + +// Well-known formats +const ( + UnknownBlock BlockType = "" + PKCS1Block BlockType = "RSA PRIVATE KEY" + PKCS8Block BlockType = "PRIVATE KEY" + ECBlock BlockType = "EC PRIVATE KEY" +) + +// ParsedPrivateKeyContainer allows common key setting for certs and CSRs +type ParsedPrivateKeyContainer interface { + SetParsedPrivateKey(crypto.Signer, PrivateKeyType, []byte) +} + +// CertBlock contains the DER-encoded certificate and the PEM +// block's byte array +type CertBlock struct { + Certificate *x509.Certificate + Bytes []byte +} + +// CertBundle contains a key type, a PEM-encoded private key, +// a PEM-encoded certificate, and a string-encoded serial number, +// returned from a successful Issue request +type CertBundle struct { + PrivateKeyType PrivateKeyType `json:"private_key_type" structs:"private_key_type" mapstructure:"private_key_type"` + Certificate string `json:"certificate" structs:"certificate" mapstructure:"certificate"` + IssuingCA string `json:"issuing_ca" structs:"issuing_ca" mapstructure:"issuing_ca"` + CAChain []string `json:"ca_chain" structs:"ca_chain" mapstructure:"ca_chain"` + PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"` + SerialNumber string `json:"serial_number" structs:"serial_number" mapstructure:"serial_number"` +} + +// ParsedCertBundle contains a key type, a DER-encoded private key, +// and a DER-encoded certificate +type ParsedCertBundle struct { + PrivateKeyType PrivateKeyType + PrivateKeyFormat BlockType + PrivateKeyBytes []byte + PrivateKey crypto.Signer + CertificateBytes []byte + Certificate *x509.Certificate + CAChain []*CertBlock +} + +// CSRBundle contains a key type, a PEM-encoded private key, +// and a PEM-encoded CSR +type CSRBundle struct { + PrivateKeyType PrivateKeyType `json:"private_key_type" structs:"private_key_type" mapstructure:"private_key_type"` + CSR string `json:"csr" structs:"csr" mapstructure:"csr"` + PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"` +} + +// ParsedCSRBundle contains a key type, a DER-encoded private key, +// and a DER-encoded certificate request +type ParsedCSRBundle struct { + PrivateKeyType PrivateKeyType + PrivateKeyBytes []byte + PrivateKey crypto.Signer + CSRBytes []byte + CSR *x509.CertificateRequest +} + +type KeyBundle struct { + PrivateKeyType PrivateKeyType + PrivateKeyBytes []byte + PrivateKey crypto.Signer +} + +func GetPrivateKeyTypeFromSigner(signer crypto.Signer) PrivateKeyType { + // We look at the public key types to work-around limitations/typing of managed keys. + switch signer.Public().(type) { + case *rsa.PublicKey: + return RSAPrivateKey + case *ecdsa.PublicKey: + return ECPrivateKey + case ed25519.PublicKey: + return Ed25519PrivateKey + } + return UnknownPrivateKey +} + +// ToPEMBundle converts a string-based certificate bundle +// to a PEM-based string certificate bundle in trust path +// order, leaf certificate first +func (c *CertBundle) ToPEMBundle() string { + var result []string + + if len(c.PrivateKey) > 0 { + result = append(result, c.PrivateKey) + } + if len(c.Certificate) > 0 { + result = append(result, c.Certificate) + } + if len(c.CAChain) > 0 { + result = append(result, c.CAChain...) + } + + return strings.Join(result, "\n") +} + +// ToParsedCertBundle converts a string-based certificate bundle +// to a byte-based raw certificate bundle +func (c *CertBundle) ToParsedCertBundle() (*ParsedCertBundle, error) { + return c.ToParsedCertBundleWithExtractor(extractAndSetPrivateKey) +} + +// PrivateKeyExtractor extract out a private key from the passed in +// CertBundle and set the appropriate bits within the ParsedCertBundle. +type PrivateKeyExtractor func(c *CertBundle, parsedBundle *ParsedCertBundle) error + +func (c *CertBundle) ToParsedCertBundleWithExtractor(privateKeyExtractor PrivateKeyExtractor) (*ParsedCertBundle, error) { + var err error + var pemBlock *pem.Block + result := &ParsedCertBundle{} + + err = privateKeyExtractor(c, result) + if err != nil { + return nil, err + } + + if len(c.Certificate) > 0 { + pemBlock, _ = pem.Decode([]byte(c.Certificate)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"} + } + result.CertificateBytes = pemBlock.Bytes + result.Certificate, err = x509.ParseCertificate(result.CertificateBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle: %v", err)} + } + } + switch { + case len(c.CAChain) > 0: + for _, cert := range c.CAChain { + pemBlock, _ := pem.Decode([]byte(cert)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"} + } + + parsedCert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via CA chain: %v", err)} + } + + certBlock := &CertBlock{ + Bytes: pemBlock.Bytes, + Certificate: parsedCert, + } + result.CAChain = append(result.CAChain, certBlock) + } + + // For backwards compatibility + case len(c.IssuingCA) > 0: + pemBlock, _ = pem.Decode([]byte(c.IssuingCA)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding ca certificate from cert bundle"} + } + + parsedCert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via issuing CA: %v", err)} + } + + certBlock := &CertBlock{ + Bytes: pemBlock.Bytes, + Certificate: parsedCert, + } + result.CAChain = append(result.CAChain, certBlock) + } + + // Populate if it isn't there already + if len(c.SerialNumber) == 0 && len(c.Certificate) > 0 { + c.SerialNumber = GetHexFormatted(result.Certificate.SerialNumber.Bytes(), ":") + } + + return result, nil +} + +func extractAndSetPrivateKey(c *CertBundle, parsedBundle *ParsedCertBundle) error { + if len(c.PrivateKey) == 0 { + return nil + } + + pemBlock, _ := pem.Decode([]byte(c.PrivateKey)) + if pemBlock == nil { + return errutil.UserError{Err: "Error decoding private key from cert bundle"} + } + + parsedBundle.PrivateKeyBytes = pemBlock.Bytes + parsedBundle.PrivateKeyFormat = BlockType(strings.TrimSpace(pemBlock.Type)) + + switch parsedBundle.PrivateKeyFormat { + case ECBlock: + parsedBundle.PrivateKeyType, c.PrivateKeyType = ECPrivateKey, ECPrivateKey + case PKCS1Block: + c.PrivateKeyType, parsedBundle.PrivateKeyType = RSAPrivateKey, RSAPrivateKey + case PKCS8Block: + t, err := getPKCS8Type(pemBlock.Bytes) + if err != nil { + return errutil.UserError{Err: fmt.Sprintf("Error getting key type from pkcs#8: %v", err)} + } + parsedBundle.PrivateKeyType = t + switch t { + case ECPrivateKey: + c.PrivateKeyType = ECPrivateKey + case RSAPrivateKey: + c.PrivateKeyType = RSAPrivateKey + case Ed25519PrivateKey: + c.PrivateKeyType = Ed25519PrivateKey + case ManagedPrivateKey: + c.PrivateKeyType = ManagedPrivateKey + } + default: + return errutil.UserError{Err: fmt.Sprintf("Unsupported key block type: %s", pemBlock.Type)} + } + + var err error + parsedBundle.PrivateKey, err = parsedBundle.getSigner() + if err != nil { + return errutil.UserError{Err: fmt.Sprintf("Error getting signer: %s", err)} + } + return nil +} + +// ToCertBundle converts a byte-based raw DER certificate bundle +// to a PEM-based string certificate bundle +func (p *ParsedCertBundle) ToCertBundle() (*CertBundle, error) { + result := &CertBundle{} + block := pem.Block{ + Type: "CERTIFICATE", + } + + if p.Certificate != nil { + result.SerialNumber = strings.TrimSpace(GetHexFormatted(p.Certificate.SerialNumber.Bytes(), ":")) + } + + if p.CertificateBytes != nil && len(p.CertificateBytes) > 0 { + block.Bytes = p.CertificateBytes + result.Certificate = strings.TrimSpace(string(pem.EncodeToMemory(&block))) + } + + for _, caCert := range p.CAChain { + block.Bytes = caCert.Bytes + certificate := strings.TrimSpace(string(pem.EncodeToMemory(&block))) + + result.CAChain = append(result.CAChain, certificate) + } + + if p.PrivateKeyBytes != nil && len(p.PrivateKeyBytes) > 0 { + block.Type = string(p.PrivateKeyFormat) + block.Bytes = p.PrivateKeyBytes + result.PrivateKeyType = p.PrivateKeyType + + // Handle bundle not parsed by us + if block.Type == "" { + switch p.PrivateKeyType { + case ECPrivateKey: + block.Type = string(ECBlock) + case RSAPrivateKey: + block.Type = string(PKCS1Block) + case Ed25519PrivateKey: + block.Type = string(PKCS8Block) + } + } + + result.PrivateKey = strings.TrimSpace(string(pem.EncodeToMemory(&block))) + } + + return result, nil +} + +// Verify checks if the parsed bundle is valid. It validates the public +// key of the certificate to the private key and checks the certificate trust +// chain for path issues. +func (p *ParsedCertBundle) Verify() error { + // If private key exists, check if it matches the public key of cert + if p.PrivateKey != nil && p.Certificate != nil { + equal, err := ComparePublicKeys(p.Certificate.PublicKey, p.PrivateKey.Public()) + if err != nil { + return errwrap.Wrapf("could not compare public and private keys: {{err}}", err) + } + if !equal { + return fmt.Errorf("public key of certificate does not match private key") + } + } + + certPath := p.GetCertificatePath() + if len(certPath) > 1 { + for i, caCert := range certPath[1:] { + if !caCert.Certificate.IsCA { + return fmt.Errorf("certificate %d of certificate chain is not a certificate authority", i+1) + } + if !bytes.Equal(certPath[i].Certificate.AuthorityKeyId, caCert.Certificate.SubjectKeyId) { + return fmt.Errorf("certificate %d of certificate chain ca trust path is incorrect (%q/%q) (%X/%X)", + i+1, + certPath[i].Certificate.Subject.CommonName, caCert.Certificate.Subject.CommonName, + certPath[i].Certificate.AuthorityKeyId, caCert.Certificate.SubjectKeyId) + } + } + } + + return nil +} + +// GetCertificatePath returns a slice of certificates making up a path, pulled +// from the parsed cert bundle +func (p *ParsedCertBundle) GetCertificatePath() []*CertBlock { + var certPath []*CertBlock + + certPath = append(certPath, &CertBlock{ + Certificate: p.Certificate, + Bytes: p.CertificateBytes, + }) + + if len(p.CAChain) > 0 { + // Root CA puts itself in the chain + if p.CAChain[0].Certificate.SerialNumber != p.Certificate.SerialNumber { + certPath = append(certPath, p.CAChain...) + } + } + + return certPath +} + +// GetSigner returns a crypto.Signer corresponding to the private key +// contained in this ParsedCertBundle. The Signer contains a Public() function +// for getting the corresponding public. The Signer can also be +// type-converted to private keys +func (p *ParsedCertBundle) getSigner() (crypto.Signer, error) { + var signer crypto.Signer + var err error + + if p.PrivateKeyBytes == nil || len(p.PrivateKeyBytes) == 0 { + return nil, errutil.UserError{Err: "Given parsed cert bundle does not have private key information"} + } + + switch p.PrivateKeyFormat { + case ECBlock: + signer, err = x509.ParseECPrivateKey(p.PrivateKeyBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private EC key: %s", err)} + } + + case PKCS1Block: + signer, err = x509.ParsePKCS1PrivateKey(p.PrivateKeyBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private RSA key: %s", err)} + } + + case PKCS8Block: + if k, err := x509.ParsePKCS8PrivateKey(p.PrivateKeyBytes); err == nil { + switch k := k.(type) { + case *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey: + return k.(crypto.Signer), nil + default: + return nil, errutil.UserError{Err: "Found unknown private key type in pkcs#8 wrapping"} + } + } + return nil, errutil.UserError{Err: fmt.Sprintf("Failed to parse pkcs#8 key: %v", err)} + default: + return nil, errutil.UserError{Err: "Unable to determine type of private key; only RSA and EC are supported"} + } + return signer, nil +} + +// SetParsedPrivateKey sets the private key parameters on the bundle +func (p *ParsedCertBundle) SetParsedPrivateKey(privateKey crypto.Signer, privateKeyType PrivateKeyType, privateKeyBytes []byte) { + p.PrivateKey = privateKey + p.PrivateKeyType = privateKeyType + p.PrivateKeyBytes = privateKeyBytes +} + +func getPKCS8Type(bs []byte) (PrivateKeyType, error) { + k, err := x509.ParsePKCS8PrivateKey(bs) + if err != nil { + return UnknownPrivateKey, errutil.UserError{Err: fmt.Sprintf("Failed to parse pkcs#8 key: %v", err)} + } + + switch k.(type) { + case *ecdsa.PrivateKey: + return ECPrivateKey, nil + case *rsa.PrivateKey: + return RSAPrivateKey, nil + case ed25519.PrivateKey: + return Ed25519PrivateKey, nil + default: + return UnknownPrivateKey, errutil.UserError{Err: "Found unknown private key type in pkcs#8 wrapping"} + } +} + +// ToParsedCSRBundle converts a string-based CSR bundle +// to a byte-based raw CSR bundle +func (c *CSRBundle) ToParsedCSRBundle() (*ParsedCSRBundle, error) { + result := &ParsedCSRBundle{} + var err error + var pemBlock *pem.Block + + if len(c.PrivateKey) > 0 { + pemBlock, _ = pem.Decode([]byte(c.PrivateKey)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding private key from cert bundle"} + } + result.PrivateKeyBytes = pemBlock.Bytes + + switch BlockType(pemBlock.Type) { + case ECBlock: + result.PrivateKeyType = ECPrivateKey + case PKCS1Block: + result.PrivateKeyType = RSAPrivateKey + default: + // Try to figure it out and correct + if _, err := x509.ParseECPrivateKey(pemBlock.Bytes); err == nil { + result.PrivateKeyType = ECPrivateKey + c.PrivateKeyType = "ec" + } else if _, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes); err == nil { + result.PrivateKeyType = RSAPrivateKey + c.PrivateKeyType = "rsa" + } else if _, err := x509.ParsePKCS8PrivateKey(pemBlock.Bytes); err == nil { + result.PrivateKeyType = Ed25519PrivateKey + c.PrivateKeyType = "ed25519" + } else { + return nil, errutil.UserError{Err: fmt.Sprintf("Unknown private key type in bundle: %s", c.PrivateKeyType)} + } + } + + result.PrivateKey, err = result.getSigner() + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error getting signer: %s", err)} + } + } + + if len(c.CSR) > 0 { + pemBlock, _ = pem.Decode([]byte(c.CSR)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"} + } + result.CSRBytes = pemBlock.Bytes + result.CSR, err = x509.ParseCertificateRequest(result.CSRBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via CSR: %v", err)} + } + } + + return result, nil +} + +// ToCSRBundle converts a byte-based raw DER certificate bundle +// to a PEM-based string certificate bundle +func (p *ParsedCSRBundle) ToCSRBundle() (*CSRBundle, error) { + result := &CSRBundle{} + block := pem.Block{ + Type: "CERTIFICATE REQUEST", + } + + if p.CSRBytes != nil && len(p.CSRBytes) > 0 { + block.Bytes = p.CSRBytes + result.CSR = strings.TrimSpace(string(pem.EncodeToMemory(&block))) + } + + if p.PrivateKeyBytes != nil && len(p.PrivateKeyBytes) > 0 { + block.Bytes = p.PrivateKeyBytes + switch p.PrivateKeyType { + case RSAPrivateKey: + result.PrivateKeyType = "rsa" + block.Type = "RSA PRIVATE KEY" + case ECPrivateKey: + result.PrivateKeyType = "ec" + block.Type = "EC PRIVATE KEY" + case Ed25519PrivateKey: + result.PrivateKeyType = "ed25519" + block.Type = "PRIVATE KEY" + case ManagedPrivateKey: + result.PrivateKeyType = ManagedPrivateKey + block.Type = "PRIVATE KEY" + default: + return nil, errutil.InternalError{Err: "Could not determine private key type when creating block"} + } + result.PrivateKey = strings.TrimSpace(string(pem.EncodeToMemory(&block))) + } + + return result, nil +} + +// GetSigner returns a crypto.Signer corresponding to the private key +// contained in this ParsedCSRBundle. The Signer contains a Public() function +// for getting the corresponding public. The Signer can also be +// type-converted to private keys +func (p *ParsedCSRBundle) getSigner() (crypto.Signer, error) { + var signer crypto.Signer + var err error + + if p.PrivateKeyBytes == nil || len(p.PrivateKeyBytes) == 0 { + return nil, errutil.UserError{Err: "Given parsed cert bundle does not have private key information"} + } + + switch p.PrivateKeyType { + case ECPrivateKey: + signer, err = x509.ParseECPrivateKey(p.PrivateKeyBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private EC key: %s", err)} + } + + case RSAPrivateKey: + signer, err = x509.ParsePKCS1PrivateKey(p.PrivateKeyBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private RSA key: %s", err)} + } + + case Ed25519PrivateKey: + signerd, err := x509.ParsePKCS8PrivateKey(p.PrivateKeyBytes) + signer = signerd.(ed25519.PrivateKey) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private Ed25519 key: %s", err)} + } + + default: + return nil, errutil.UserError{Err: "Unable to determine type of private key; only RSA, Ed25519 and EC are supported"} + } + return signer, nil +} + +// SetParsedPrivateKey sets the private key parameters on the bundle +func (p *ParsedCSRBundle) SetParsedPrivateKey(privateKey crypto.Signer, privateKeyType PrivateKeyType, privateKeyBytes []byte) { + p.PrivateKey = privateKey + p.PrivateKeyType = privateKeyType + p.PrivateKeyBytes = privateKeyBytes +} + +// getTLSConfig returns a TLS config generally suitable for client +// authentication. The returned TLS config can be modified slightly +// to be made suitable for a server requiring client authentication; +// specifically, you should set the value of ClientAuth in the returned +// config to match your needs. +func (p *ParsedCertBundle) GetTLSConfig(usage TLSUsage) (*tls.Config, error) { + tlsCert := tls.Certificate{ + Certificate: [][]byte{}, + } + + tlsConfig := &tls.Config{ + MinVersion: tls.VersionTLS12, + } + + if p.Certificate != nil { + tlsCert.Leaf = p.Certificate + } + + if p.PrivateKey != nil { + tlsCert.PrivateKey = p.PrivateKey + } + + if p.CertificateBytes != nil && len(p.CertificateBytes) > 0 { + tlsCert.Certificate = append(tlsCert.Certificate, p.CertificateBytes) + } + + if len(p.CAChain) > 0 { + for _, cert := range p.CAChain { + tlsCert.Certificate = append(tlsCert.Certificate, cert.Bytes) + } + + // Technically we only need one cert, but this doesn't duplicate code + certBundle, err := p.ToCertBundle() + if err != nil { + return nil, errwrap.Wrapf("error converting parsed bundle to string bundle when getting TLS config: {{err}}", err) + } + + caPool := x509.NewCertPool() + ok := caPool.AppendCertsFromPEM([]byte(certBundle.CAChain[0])) + if !ok { + return nil, fmt.Errorf("could not append CA certificate") + } + + if usage&TLSServer > 0 { + tlsConfig.ClientCAs = caPool + tlsConfig.ClientAuth = tls.VerifyClientCertIfGiven + } + if usage&TLSClient > 0 { + tlsConfig.RootCAs = caPool + } + } + + if tlsCert.Certificate != nil && len(tlsCert.Certificate) > 0 { + tlsConfig.Certificates = []tls.Certificate{tlsCert} + } + + return tlsConfig, nil +} + +// IssueData is a structure that is suitable for marshaling into a request; +// either via JSON, or into a map[string]interface{} via the structs package +type IssueData struct { + TTL string `json:"ttl" structs:"ttl" mapstructure:"ttl"` + CommonName string `json:"common_name" structs:"common_name" mapstructure:"common_name"` + OU string `json:"ou" structs:"ou" mapstructure:"ou"` + AltNames string `json:"alt_names" structs:"alt_names" mapstructure:"alt_names"` + IPSANs string `json:"ip_sans" structs:"ip_sans" mapstructure:"ip_sans"` + CSR string `json:"csr" structs:"csr" mapstructure:"csr"` + OtherSANs string `json:"other_sans" structs:"other_sans" mapstructure:"other_sans"` +} + +type URLEntries struct { + IssuingCertificates []string `json:"issuing_certificates" structs:"issuing_certificates" mapstructure:"issuing_certificates"` + CRLDistributionPoints []string `json:"crl_distribution_points" structs:"crl_distribution_points" mapstructure:"crl_distribution_points"` + OCSPServers []string `json:"ocsp_servers" structs:"ocsp_servers" mapstructure:"ocsp_servers"` +} + +type NotAfterBehavior int + +const ( + ErrNotAfterBehavior NotAfterBehavior = iota + TruncateNotAfterBehavior + PermitNotAfterBehavior +) + +var notAfterBehaviorNames = map[NotAfterBehavior]string{ + ErrNotAfterBehavior: "err", + TruncateNotAfterBehavior: "truncate", + PermitNotAfterBehavior: "permit", +} + +func (n NotAfterBehavior) String() string { + if name, ok := notAfterBehaviorNames[n]; ok && len(name) > 0 { + return name + } + + return "unknown" +} + +type CAInfoBundle struct { + ParsedCertBundle + URLs *URLEntries + LeafNotAfterBehavior NotAfterBehavior + RevocationSigAlg x509.SignatureAlgorithm +} + +func (b *CAInfoBundle) GetCAChain() []*CertBlock { + chain := []*CertBlock{} + + // Include issuing CA in Chain, not including Root Authority + if (len(b.Certificate.AuthorityKeyId) > 0 && + !bytes.Equal(b.Certificate.AuthorityKeyId, b.Certificate.SubjectKeyId)) || + (len(b.Certificate.AuthorityKeyId) == 0 && + !bytes.Equal(b.Certificate.RawIssuer, b.Certificate.RawSubject)) { + + chain = b.GetFullChain() + } + + return chain +} + +func (b *CAInfoBundle) GetFullChain() []*CertBlock { + var chain []*CertBlock + + // Some bundles already include the root included in the chain, + // so don't include it twice. + if len(b.CAChain) == 0 || !bytes.Equal(b.CAChain[0].Bytes, b.CertificateBytes) { + chain = append(chain, &CertBlock{ + Certificate: b.Certificate, + Bytes: b.CertificateBytes, + }) + } + + if len(b.CAChain) > 0 { + chain = append(chain, b.CAChain...) + } + + return chain +} + +type CertExtKeyUsage int + +const ( + AnyExtKeyUsage CertExtKeyUsage = 1 << iota + ServerAuthExtKeyUsage + ClientAuthExtKeyUsage + CodeSigningExtKeyUsage + EmailProtectionExtKeyUsage + IpsecEndSystemExtKeyUsage + IpsecTunnelExtKeyUsage + IpsecUserExtKeyUsage + TimeStampingExtKeyUsage + OcspSigningExtKeyUsage + MicrosoftServerGatedCryptoExtKeyUsage + NetscapeServerGatedCryptoExtKeyUsage + MicrosoftCommercialCodeSigningExtKeyUsage + MicrosoftKernelCodeSigningExtKeyUsage +) + +type CreationParameters struct { + Subject pkix.Name + DNSNames []string + EmailAddresses []string + IPAddresses []net.IP + URIs []*url.URL + OtherSANs map[string][]string + IsCA bool + KeyType string + KeyBits int + NotAfter time.Time + KeyUsage x509.KeyUsage + ExtKeyUsage CertExtKeyUsage + ExtKeyUsageOIDs []string + PolicyIdentifiers []string + BasicConstraintsValidForNonCA bool + SignatureBits int + UsePSS bool + ForceAppendCaChain bool + + // Only used when signing a CA cert + UseCSRValues bool + PermittedDNSDomains []string + + // URLs to encode into the certificate + URLs *URLEntries + + // The maximum path length to encode + MaxPathLength int + + // The duration the certificate will use NotBefore + NotBeforeDuration time.Duration + + // The explicit SKID to use; especially useful for cross-signing. + SKID []byte +} + +type CreationBundle struct { + Params *CreationParameters + SigningBundle *CAInfoBundle + CSR *x509.CertificateRequest +} + +// addKeyUsages adds appropriate key usages to the template given the creation +// information +func AddKeyUsages(data *CreationBundle, certTemplate *x509.Certificate) { + if data.Params.IsCA { + certTemplate.KeyUsage = x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign) + return + } + + certTemplate.KeyUsage = data.Params.KeyUsage + + if data.Params.ExtKeyUsage&AnyExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageAny) + } + + if data.Params.ExtKeyUsage&ServerAuthExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageServerAuth) + } + + if data.Params.ExtKeyUsage&ClientAuthExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if data.Params.ExtKeyUsage&CodeSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageCodeSigning) + } + + if data.Params.ExtKeyUsage&EmailProtectionExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageEmailProtection) + } + + if data.Params.ExtKeyUsage&IpsecEndSystemExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECEndSystem) + } + + if data.Params.ExtKeyUsage&IpsecTunnelExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECTunnel) + } + + if data.Params.ExtKeyUsage&IpsecUserExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECUser) + } + + if data.Params.ExtKeyUsage&TimeStampingExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageTimeStamping) + } + + if data.Params.ExtKeyUsage&OcspSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageOCSPSigning) + } + + if data.Params.ExtKeyUsage&MicrosoftServerGatedCryptoExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftServerGatedCrypto) + } + + if data.Params.ExtKeyUsage&NetscapeServerGatedCryptoExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageNetscapeServerGatedCrypto) + } + + if data.Params.ExtKeyUsage&MicrosoftCommercialCodeSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftCommercialCodeSigning) + } + + if data.Params.ExtKeyUsage&MicrosoftKernelCodeSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftKernelCodeSigning) + } +} + +// SetParsedPrivateKey sets the private key parameters on the bundle +func (p *KeyBundle) SetParsedPrivateKey(privateKey crypto.Signer, privateKeyType PrivateKeyType, privateKeyBytes []byte) { + p.PrivateKey = privateKey + p.PrivateKeyType = privateKeyType + p.PrivateKeyBytes = privateKeyBytes +} + +func (p *KeyBundle) ToPrivateKeyPemString() (string, error) { + block := pem.Block{} + + if p.PrivateKeyBytes != nil && len(p.PrivateKeyBytes) > 0 { + block.Bytes = p.PrivateKeyBytes + switch p.PrivateKeyType { + case RSAPrivateKey: + block.Type = "RSA PRIVATE KEY" + case ECPrivateKey: + block.Type = "EC PRIVATE KEY" + default: + block.Type = "PRIVATE KEY" + } + privateKeyPemString := strings.TrimSpace(string(pem.EncodeToMemory(&block))) + return privateKeyPemString, nil + } + + return "", errutil.InternalError{Err: "No Private Key Bytes to Wrap"} +} + +// PolicyIdentifierWithQualifierEntry Structure for Internal Storage +type PolicyIdentifierWithQualifierEntry struct { + PolicyIdentifierOid string `json:"oid",mapstructure:"oid"` + CPS string `json:"cps,omitempty",mapstructure:"cps"` + Notice string `json:"notice,omitempty",mapstructure:"notice"` +} + +// GetPolicyIdentifierFromString parses out the internal structure of a Policy Identifier +func GetPolicyIdentifierFromString(policyIdentifier string) (*PolicyIdentifierWithQualifierEntry, error) { + if policyIdentifier == "" { + return nil, nil + } + entry := &PolicyIdentifierWithQualifierEntry{} + // Either a OID, or a JSON Entry: First check OID: + _, err := StringToOid(policyIdentifier) + if err == nil { + entry.PolicyIdentifierOid = policyIdentifier + return entry, nil + } + // Now Check If JSON Entry + jsonErr := json.Unmarshal([]byte(policyIdentifier), &entry) + if jsonErr != nil { // Neither, if we got here + return entry, errors.New(fmt.Sprintf("Policy Identifier %q is neither a valid OID: %s, Nor JSON Policy Identifier: %s", policyIdentifier, err.Error(), jsonErr.Error())) + } + return entry, nil +} + +// Policy Identifier with Qualifier Structure for ASN Marshalling: + +var policyInformationOid = asn1.ObjectIdentifier{2, 5, 29, 32} + +type policyInformation struct { + PolicyIdentifier asn1.ObjectIdentifier + Qualifiers []interface{} `asn1:"tag:optional,omitempty"` +} + +var cpsPolicyQualifierID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 1} + +type cpsUrlPolicyQualifier struct { + PolicyQualifierID asn1.ObjectIdentifier + Qualifier string `asn1:"tag:optional,ia5"` +} + +var userNoticePolicyQualifierID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 2} + +type userNoticePolicyQualifier struct { + PolicyQualifierID asn1.ObjectIdentifier + Qualifier userNotice +} + +type userNotice struct { + ExplicitText string `asn1:"tag:optional,utf8"` +} + +func createPolicyIdentifierWithQualifier(entry PolicyIdentifierWithQualifierEntry) (*policyInformation, error) { + // Each Policy is Identified by a Unique ID, as designated here: + policyOid, err := StringToOid(entry.PolicyIdentifierOid) + if err != nil { + return nil, err + } + pi := policyInformation{ + PolicyIdentifier: policyOid, + } + if entry.CPS != "" { + qualifier := cpsUrlPolicyQualifier{ + PolicyQualifierID: cpsPolicyQualifierID, + Qualifier: entry.CPS, + } + pi.Qualifiers = append(pi.Qualifiers, qualifier) + } + if entry.Notice != "" { + qualifier := userNoticePolicyQualifier{ + PolicyQualifierID: userNoticePolicyQualifierID, + Qualifier: userNotice{ + ExplicitText: entry.Notice, + }, + } + pi.Qualifiers = append(pi.Qualifiers, qualifier) + } + return &pi, nil +} + +// CreatePolicyInformationExtensionFromStorageStrings parses the stored policyIdentifiers, which might be JSON Policy +// Identifier with Qualifier Entries or String OIDs, and returns an extension if everything parsed correctly, and an +// error if constructing +func CreatePolicyInformationExtensionFromStorageStrings(policyIdentifiers []string) (*pkix.Extension, error) { + var policyInformationList []policyInformation + for _, policyIdentifierStr := range policyIdentifiers { + policyIdentifierEntry, err := GetPolicyIdentifierFromString(policyIdentifierStr) + if err != nil { + return nil, err + } + if policyIdentifierEntry != nil { // Okay to skip empty entries if there is no error + policyInformationStruct, err := createPolicyIdentifierWithQualifier(*policyIdentifierEntry) + if err != nil { + return nil, err + } + policyInformationList = append(policyInformationList, *policyInformationStruct) + } + } + asn1Bytes, err := asn1.Marshal(policyInformationList) + if err != nil { + return nil, err + } + return &pkix.Extension{ + Id: policyInformationOid, + Critical: false, + Value: asn1Bytes, + }, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go b/vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go new file mode 100644 index 00000000000..924f82a2a1b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go @@ -0,0 +1,222 @@ +package compressutil + +import ( + "bytes" + "compress/gzip" + "compress/lzw" + "fmt" + "io" + + "github.com/golang/snappy" + "github.com/hashicorp/errwrap" + "github.com/pierrec/lz4" +) + +const ( + // A byte value used as a canary prefix for the compressed information + // which is used to distinguish if a JSON input is compressed or not. + // The value of this constant should not be a first character of any + // valid JSON string. + + CompressionTypeGzip = "gzip" + CompressionCanaryGzip byte = 'G' + + CompressionTypeLZW = "lzw" + CompressionCanaryLZW byte = 'L' + + CompressionTypeSnappy = "snappy" + CompressionCanarySnappy byte = 'S' + + CompressionTypeLZ4 = "lz4" + CompressionCanaryLZ4 byte = '4' +) + +// SnappyReadCloser embeds the snappy reader which implements the io.Reader +// interface. The decompress procedure in this utility expects an +// io.ReadCloser. This type implements the io.Closer interface to retain the +// generic way of decompression. +type CompressUtilReadCloser struct { + io.Reader +} + +// Close is a noop method implemented only to satisfy the io.Closer interface +func (c *CompressUtilReadCloser) Close() error { + return nil +} + +// CompressionConfig is used to select a compression type to be performed by +// Compress and Decompress utilities. +// Supported types are: +// * CompressionTypeLZW +// * CompressionTypeGzip +// * CompressionTypeSnappy +// * CompressionTypeLZ4 +// +// When using CompressionTypeGzip, the compression levels can also be chosen: +// * gzip.DefaultCompression +// * gzip.BestSpeed +// * gzip.BestCompression +type CompressionConfig struct { + // Type of the compression algorithm to be used + Type string + + // When using Gzip format, the compression level to employ + GzipCompressionLevel int +} + +// Compress places the canary byte in a buffer and uses the same buffer to fill +// in the compressed information of the given input. The configuration supports +// two type of compression: LZW and Gzip. When using Gzip compression format, +// if GzipCompressionLevel is not specified, the 'gzip.DefaultCompression' will +// be assumed. +func Compress(data []byte, config *CompressionConfig) ([]byte, error) { + var buf bytes.Buffer + var writer io.WriteCloser + var err error + + if config == nil { + return nil, fmt.Errorf("config is nil") + } + + // Write the canary into the buffer and create writer to compress the + // input data based on the configured type + switch config.Type { + case CompressionTypeLZW: + buf.Write([]byte{CompressionCanaryLZW}) + writer = lzw.NewWriter(&buf, lzw.LSB, 8) + + case CompressionTypeGzip: + buf.Write([]byte{CompressionCanaryGzip}) + + switch { + case config.GzipCompressionLevel == gzip.BestCompression, + config.GzipCompressionLevel == gzip.BestSpeed, + config.GzipCompressionLevel == gzip.DefaultCompression: + // These are valid compression levels + default: + // If compression level is set to NoCompression or to + // any invalid value, fallback to Defaultcompression + config.GzipCompressionLevel = gzip.DefaultCompression + } + writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel) + + case CompressionTypeSnappy: + buf.Write([]byte{CompressionCanarySnappy}) + writer = snappy.NewBufferedWriter(&buf) + + case CompressionTypeLZ4: + buf.Write([]byte{CompressionCanaryLZ4}) + writer = lz4.NewWriter(&buf) + + default: + return nil, fmt.Errorf("unsupported compression type") + } + + if err != nil { + return nil, errwrap.Wrapf("failed to create a compression writer: {{err}}", err) + } + + if writer == nil { + return nil, fmt.Errorf("failed to create a compression writer") + } + + // Compress the input and place it in the same buffer containing the + // canary byte. + if _, err = writer.Write(data); err != nil { + return nil, errwrap.Wrapf("failed to compress input data: err: {{err}}", err) + } + + // Close the io.WriteCloser + if err = writer.Close(); err != nil { + return nil, err + } + + // Return the compressed bytes with canary byte at the start + return buf.Bytes(), nil +} + +// Decompress checks if the first byte in the input matches the canary byte. +// If the first byte is a canary byte, then the input past the canary byte +// will be decompressed using the method specified in the given configuration. +// If the first byte isn't a canary byte, then the utility returns a boolean +// value indicating that the input was not compressed. +func Decompress(data []byte) ([]byte, bool, error) { + bytes, _, notCompressed, err := DecompressWithCanary(data) + return bytes, notCompressed, err +} + +// DecompressWithCanary checks if the first byte in the input matches the canary byte. +// If the first byte is a canary byte, then the input past the canary byte +// will be decompressed using the method specified in the given configuration. The type of compression used is also +// returned. If the first byte isn't a canary byte, then the utility returns a boolean +// value indicating that the input was not compressed. +func DecompressWithCanary(data []byte) ([]byte, string, bool, error) { + var err error + var reader io.ReadCloser + var compressionType string + if data == nil || len(data) == 0 { + return nil, "", false, fmt.Errorf("'data' being decompressed is empty") + } + + canary := data[0] + cData := data[1:] + + switch canary { + // If the first byte matches the canary byte, remove the canary + // byte and try to decompress the data that is after the canary. + case CompressionCanaryGzip: + if len(data) < 2 { + return nil, "", false, fmt.Errorf("invalid 'data' after the canary") + } + reader, err = gzip.NewReader(bytes.NewReader(cData)) + compressionType = CompressionTypeGzip + + case CompressionCanaryLZW: + if len(data) < 2 { + return nil, "", false, fmt.Errorf("invalid 'data' after the canary") + } + reader = lzw.NewReader(bytes.NewReader(cData), lzw.LSB, 8) + compressionType = CompressionTypeLZW + + case CompressionCanarySnappy: + if len(data) < 2 { + return nil, "", false, fmt.Errorf("invalid 'data' after the canary") + } + reader = &CompressUtilReadCloser{ + Reader: snappy.NewReader(bytes.NewReader(cData)), + } + compressionType = CompressionTypeSnappy + + case CompressionCanaryLZ4: + if len(data) < 2 { + return nil, "", false, fmt.Errorf("invalid 'data' after the canary") + } + reader = &CompressUtilReadCloser{ + Reader: lz4.NewReader(bytes.NewReader(cData)), + } + compressionType = CompressionTypeLZ4 + + default: + // If the first byte doesn't match the canary byte, it means + // that the content was not compressed at all. Indicate the + // caller that the input was not compressed. + return nil, "", true, nil + } + if err != nil { + return nil, "", false, errwrap.Wrapf("failed to create a compression reader: {{err}}", err) + } + if reader == nil { + return nil, "", false, fmt.Errorf("failed to create a compression reader") + } + + // Close the io.ReadCloser + defer reader.Close() + + // Read all the compressed data into a buffer + var buf bytes.Buffer + if _, err = io.Copy(&buf, reader); err != nil { + return nil, "", false, err + } + + return buf.Bytes(), compressionType, false, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/agent.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/agent.go new file mode 100644 index 00000000000..55be844e14e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/agent.go @@ -0,0 +1,12 @@ +package consts + +// AgentPathCacheClear is the path that the agent will use as its cache-clear +// endpoint. +const AgentPathCacheClear = "/agent/v1/cache-clear" + +// AgentPathMetrics is the path the the agent will use to expose its internal +// metrics. +const AgentPathMetrics = "/agent/v1/metrics" + +// AgentPathQuit is the path that the agent will use to trigger stopping it. +const AgentPathQuit = "/agent/v1/quit" diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go new file mode 100644 index 00000000000..c431e2e5941 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go @@ -0,0 +1,37 @@ +package consts + +const ( + // ExpirationRestoreWorkerCount specifies the number of workers to use while + // restoring leases into the expiration manager + ExpirationRestoreWorkerCount = 64 + + // NamespaceHeaderName is the header set to specify which namespace the + // request is indented for. + NamespaceHeaderName = "X-Vault-Namespace" + + // AuthHeaderName is the name of the header containing the token. + AuthHeaderName = "X-Vault-Token" + + // RequestHeaderName is the name of the header used by the Agent for + // SSRF protection. + RequestHeaderName = "X-Vault-Request" + + // PerformanceReplicationALPN is the negotiated protocol used for + // performance replication. + PerformanceReplicationALPN = "replication_v1" + + // DRReplicationALPN is the negotiated protocol used for dr replication. + DRReplicationALPN = "replication_dr_v1" + + PerfStandbyALPN = "perf_standby_v1" + + RequestForwardingALPN = "req_fw_sb-act_v1" + + RaftStorageALPN = "raft_storage_v1" + + // ReplicationResolverALPN is the negotiated protocol used for + // resolving replicaiton addresses + ReplicationResolverALPN = "replication_resolver_v1" + + VaultEnableFilePermissionsCheckEnv = "VAULT_ENABLE_FILE_PERMISSIONS_CHECK" +) diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/deprecation_status.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/deprecation_status.go new file mode 100644 index 00000000000..5591924a770 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/deprecation_status.go @@ -0,0 +1,31 @@ +package consts + +const VaultAllowPendingRemovalMountsEnv = "VAULT_ALLOW_PENDING_REMOVAL_MOUNTS" + +// DeprecationStatus represents the current deprecation state for builtins +type DeprecationStatus uint32 + +// These are the states of deprecation for builtin plugins +const ( + Supported = iota + Deprecated + PendingRemoval + Removed + Unknown +) + +// String returns the string representation of a builtin deprecation status +func (s DeprecationStatus) String() string { + switch s { + case Supported: + return "supported" + case Deprecated: + return "deprecated" + case PendingRemoval: + return "pending removal" + case Removed: + return "removed" + default: + return "" + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/error.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/error.go new file mode 100644 index 00000000000..1a9175c6392 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/error.go @@ -0,0 +1,25 @@ +package consts + +import "errors" + +var ( + // ErrSealed is returned if an operation is performed on a sealed barrier. + // No operation is expected to succeed before unsealing + ErrSealed = errors.New("Vault is sealed") + + // ErrAPILocked is returned if an operation is performed when the API is + // locked for the request namespace. + ErrAPILocked = errors.New("API access to this namespace has been locked by an administrator") + + // ErrStandby is returned if an operation is performed on a standby Vault. + // No operation is expected to succeed until active. + ErrStandby = errors.New("Vault is in standby mode") + + // ErrPathContainsParentReferences is returned when a path contains parent + // references. + ErrPathContainsParentReferences = errors.New("path cannot contain parent references") + + // ErrInvalidWrappingToken is returned when checking for the validity of + // a wrapping token that turns out to be invalid. + ErrInvalidWrappingToken = errors.New("wrapping token is not valid or does not exist") +) diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/plugin_types.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/plugin_types.go new file mode 100644 index 00000000000..e0a00e4860c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/plugin_types.go @@ -0,0 +1,59 @@ +package consts + +import "fmt" + +var PluginTypes = []PluginType{ + PluginTypeUnknown, + PluginTypeCredential, + PluginTypeDatabase, + PluginTypeSecrets, +} + +type PluginType uint32 + +// This is a list of PluginTypes used by Vault. +// If we need to add any in the future, it would +// be best to add them to the _end_ of the list below +// because they resolve to incrementing numbers, +// which may be saved in state somewhere. Thus if +// the name for one of those numbers changed because +// a value were added to the middle, that could cause +// the wrong plugin types to be read from storage +// for a given underlying number. Example of the problem +// here: https://play.golang.org/p/YAaPw5ww3er +const ( + PluginTypeUnknown PluginType = iota + PluginTypeCredential + PluginTypeDatabase + PluginTypeSecrets +) + +func (p PluginType) String() string { + switch p { + case PluginTypeUnknown: + return "unknown" + case PluginTypeCredential: + return "auth" + case PluginTypeDatabase: + return "database" + case PluginTypeSecrets: + return "secret" + default: + return "unsupported" + } +} + +func ParsePluginType(pluginType string) (PluginType, error) { + switch pluginType { + case "unknown": + return PluginTypeUnknown, nil + case "auth": + return PluginTypeCredential, nil + case "database": + return PluginTypeDatabase, nil + case "secret": + return PluginTypeSecrets, nil + default: + return PluginTypeUnknown, fmt.Errorf("%q is not a supported plugin type", pluginType) + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go new file mode 100644 index 00000000000..f72c2f47aee --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go @@ -0,0 +1,159 @@ +package consts + +const ( + // N.B. This needs to be excluded from replication despite the name; it's + // merely saying that this is cluster information for the replicated + // cluster. + CoreReplicatedClusterPrefix = "core/cluster/replicated/" + CoreReplicatedClusterPrefixDR = "core/cluster/replicated-dr/" + + CoreReplicatedClusterInfoPath = CoreReplicatedClusterPrefix + "info" + CoreReplicatedClusterSecondariesPrefix = CoreReplicatedClusterPrefix + "secondaries/" + CoreReplicatedClusterInfoPathDR = CoreReplicatedClusterPrefixDR + "info" + CoreReplicatedClusterSecondariesPrefixDR = CoreReplicatedClusterPrefixDR + "secondaries/" + + // This is an identifier for the current secondary in the replicated paths + // manager. It should contain a character that is not allowed in secondary + // ids to ensure it doesn't collide. + CurrentReplicatedSecondaryIdentifier = ".current" + CoreFeatureFlagPath = "core/cluster/feature-flags" +) + +type ReplicationState uint32 + +const ( + _ ReplicationState = iota + OldReplicationPrimary + OldReplicationSecondary + OldReplicationBootstrapping + // Don't add anything here. Adding anything to this Old block would cause + // the rest of the values to change below. This was done originally to + // ensure no overlap between old and new values. + + ReplicationUnknown ReplicationState = 0 + ReplicationPerformancePrimary ReplicationState = 1 << iota // Note -- iota is 5 here! + ReplicationPerformanceSecondary + OldSplitReplicationBootstrapping + ReplicationDRPrimary + ReplicationDRSecondary + ReplicationPerformanceBootstrapping + ReplicationDRBootstrapping + ReplicationPerformanceDisabled + ReplicationDRDisabled + ReplicationPerformanceStandby +) + +// We verify no change to the above values are made +func init() { + if OldReplicationBootstrapping != 3 { + panic("Replication Constants have changed") + } + + if ReplicationPerformancePrimary != 1<<5 { + panic("Replication Constants have changed") + } +} + +func (r ReplicationState) string() string { + switch r { + case ReplicationPerformanceSecondary: + return "secondary" + case ReplicationPerformancePrimary: + return "primary" + case ReplicationPerformanceBootstrapping: + return "bootstrapping" + case ReplicationPerformanceDisabled: + return "disabled" + case ReplicationDRPrimary: + return "primary" + case ReplicationDRSecondary: + return "secondary" + case ReplicationDRBootstrapping: + return "bootstrapping" + case ReplicationDRDisabled: + return "disabled" + } + + return "unknown" +} + +func (r ReplicationState) StateStrings() []string { + var ret []string + if r.HasState(ReplicationPerformanceSecondary) { + ret = append(ret, "perf-secondary") + } + if r.HasState(ReplicationPerformancePrimary) { + ret = append(ret, "perf-primary") + } + if r.HasState(ReplicationPerformanceBootstrapping) { + ret = append(ret, "perf-bootstrapping") + } + if r.HasState(ReplicationPerformanceDisabled) { + ret = append(ret, "perf-disabled") + } + if r.HasState(ReplicationDRPrimary) { + ret = append(ret, "dr-primary") + } + if r.HasState(ReplicationDRSecondary) { + ret = append(ret, "dr-secondary") + } + if r.HasState(ReplicationDRBootstrapping) { + ret = append(ret, "dr-bootstrapping") + } + if r.HasState(ReplicationDRDisabled) { + ret = append(ret, "dr-disabled") + } + if r.HasState(ReplicationPerformanceStandby) { + ret = append(ret, "perfstandby") + } + + return ret +} + +func (r ReplicationState) GetDRString() string { + switch { + case r.HasState(ReplicationDRBootstrapping): + return ReplicationDRBootstrapping.string() + case r.HasState(ReplicationDRPrimary): + return ReplicationDRPrimary.string() + case r.HasState(ReplicationDRSecondary): + return ReplicationDRSecondary.string() + case r.HasState(ReplicationDRDisabled): + return ReplicationDRDisabled.string() + default: + return "unknown" + } +} + +func (r ReplicationState) GetPerformanceString() string { + switch { + case r.HasState(ReplicationPerformanceBootstrapping): + return ReplicationPerformanceBootstrapping.string() + case r.HasState(ReplicationPerformancePrimary): + return ReplicationPerformancePrimary.string() + case r.HasState(ReplicationPerformanceSecondary): + return ReplicationPerformanceSecondary.string() + case r.HasState(ReplicationPerformanceDisabled): + return ReplicationPerformanceDisabled.string() + default: + return "unknown" + } +} + +func (r ReplicationState) IsPrimaryState() bool { + return r.HasState(ReplicationPerformancePrimary | ReplicationDRPrimary) +} + +func (r ReplicationState) HasState(flag ReplicationState) bool { return r&flag != 0 } +func (r *ReplicationState) AddState(flag ReplicationState) { *r |= flag } +func (r *ReplicationState) ClearState(flag ReplicationState) { *r &= ^flag } +func (r *ReplicationState) ToggleState(flag ReplicationState) { *r ^= flag } + +type HAState uint32 + +const ( + _ HAState = iota + Standby + PerfStandby + Active +) diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/token_consts.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/token_consts.go new file mode 100644 index 00000000000..2b4e0278bf2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/token_consts.go @@ -0,0 +1,10 @@ +package consts + +const ( + ServiceTokenPrefix = "hvs." + BatchTokenPrefix = "hvb." + RecoveryTokenPrefix = "hvr." + LegacyServiceTokenPrefix = "s." + LegacyBatchTokenPrefix = "b." + LegacyRecoveryTokenPrefix = "r." +) diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go b/vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go new file mode 100644 index 00000000000..a37086c645d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go @@ -0,0 +1,11 @@ +package cryptoutil + +import "golang.org/x/crypto/blake2b" + +func Blake2b256Hash(key string) []byte { + hf, _ := blake2b.New256(nil) + + hf.Write([]byte(key)) + + return hf.Sum(nil) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go b/vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go new file mode 100644 index 00000000000..0b95efb40e3 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go @@ -0,0 +1,20 @@ +package errutil + +// UserError represents an error generated due to invalid user input +type UserError struct { + Err string +} + +func (e UserError) Error() string { + return e.Err +} + +// InternalError represents an error generated internally, +// presumably not due to invalid user input +type InternalError struct { + Err string +} + +func (e InternalError) Error() string { + return e.Err +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/hclutil/hcl.go b/vendor/github.com/hashicorp/vault/sdk/helper/hclutil/hcl.go new file mode 100644 index 00000000000..0b120367d5a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/hclutil/hcl.go @@ -0,0 +1,36 @@ +package hclutil + +import ( + "fmt" + + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl/hcl/ast" +) + +// CheckHCLKeys checks whether the keys in the AST list contains any of the valid keys provided. +func CheckHCLKeys(node ast.Node, valid []string) error { + var list *ast.ObjectList + switch n := node.(type) { + case *ast.ObjectList: + list = n + case *ast.ObjectType: + list = n.List + default: + return fmt.Errorf("cannot check HCL keys of type %T", n) + } + + validMap := make(map[string]struct{}, len(valid)) + for _, v := range valid { + validMap[v] = struct{}{} + } + + var result error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := validMap[key]; !ok { + result = multierror.Append(result, fmt.Errorf("invalid key %q on line %d", key, item.Assign.Line)) + } + } + + return result +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go b/vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go new file mode 100644 index 00000000000..c03a4f8c8d1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go @@ -0,0 +1,100 @@ +package jsonutil + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + "io" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/compressutil" +) + +// Encodes/Marshals the given object into JSON +func EncodeJSON(in interface{}) ([]byte, error) { + if in == nil { + return nil, fmt.Errorf("input for encoding is nil") + } + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + if err := enc.Encode(in); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// EncodeJSONAndCompress encodes the given input into JSON and compresses the +// encoded value (using Gzip format BestCompression level, by default). A +// canary byte is placed at the beginning of the returned bytes for the logic +// in decompression method to identify compressed input. +func EncodeJSONAndCompress(in interface{}, config *compressutil.CompressionConfig) ([]byte, error) { + if in == nil { + return nil, fmt.Errorf("input for encoding is nil") + } + + // First JSON encode the given input + encodedBytes, err := EncodeJSON(in) + if err != nil { + return nil, err + } + + if config == nil { + config = &compressutil.CompressionConfig{ + Type: compressutil.CompressionTypeGzip, + GzipCompressionLevel: gzip.BestCompression, + } + } + + return compressutil.Compress(encodedBytes, config) +} + +// DecodeJSON tries to decompress the given data. The call to decompress, fails +// if the content was not compressed in the first place, which is identified by +// a canary byte before the compressed data. If the data is not compressed, it +// is JSON decoded directly. Otherwise the decompressed data will be JSON +// decoded. +func DecodeJSON(data []byte, out interface{}) error { + if data == nil || len(data) == 0 { + return fmt.Errorf("'data' being decoded is nil") + } + if out == nil { + return fmt.Errorf("output parameter 'out' is nil") + } + + // Decompress the data if it was compressed in the first place + decompressedBytes, uncompressed, err := compressutil.Decompress(data) + if err != nil { + return errwrap.Wrapf("failed to decompress JSON: {{err}}", err) + } + if !uncompressed && (decompressedBytes == nil || len(decompressedBytes) == 0) { + return fmt.Errorf("decompressed data being decoded is invalid") + } + + // If the input supplied failed to contain the compression canary, it + // will be notified by the compression utility. Decode the decompressed + // input. + if !uncompressed { + data = decompressedBytes + } + + return DecodeJSONFromReader(bytes.NewReader(data), out) +} + +// Decodes/Unmarshals the given io.Reader pointing to a JSON, into a desired object +func DecodeJSONFromReader(r io.Reader, out interface{}) error { + if r == nil { + return fmt.Errorf("'io.Reader' being decoded is nil") + } + if out == nil { + return fmt.Errorf("output parameter 'out' is nil") + } + + dec := json.NewDecoder(r) + + // While decoding JSON values, interpret the integer values as `json.Number`s instead of `float64`. + dec.UseNumber() + + // Since 'out' is an interface representing a pointer, pass it to the decoder without an '&' + return dec.Decode(out) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go b/vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go new file mode 100644 index 00000000000..c7c000a58a3 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go @@ -0,0 +1,10 @@ +package license + +// Features is a bitmask of feature flags +type Features uint + +const FeatureNone Features = 0 + +func (f Features) HasFeature(flag Features) bool { + return false +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go b/vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go new file mode 100644 index 00000000000..35ffcf739d9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go @@ -0,0 +1,58 @@ +package locksutil + +import ( + "sync" + + "github.com/hashicorp/vault/sdk/helper/cryptoutil" +) + +const ( + LockCount = 256 +) + +type LockEntry struct { + sync.RWMutex +} + +// CreateLocks returns an array so that the locks can be iterated over in +// order. +// +// This is only threadsafe if a process is using a single lock, or iterating +// over the entire lock slice in order. Using a consistent order avoids +// deadlocks because you can never have the following: +// +// Lock A, Lock B +// Lock B, Lock A +// +// Where process 1 is now deadlocked trying to lock B, and process 2 deadlocked trying to lock A +func CreateLocks() []*LockEntry { + ret := make([]*LockEntry, LockCount) + for i := range ret { + ret[i] = new(LockEntry) + } + return ret +} + +func LockIndexForKey(key string) uint8 { + return uint8(cryptoutil.Blake2b256Hash(key)[0]) +} + +func LockForKey(locks []*LockEntry, key string) *LockEntry { + return locks[LockIndexForKey(key)] +} + +func LocksForKeys(locks []*LockEntry, keys []string) []*LockEntry { + lockIndexes := make(map[uint8]struct{}, len(keys)) + for _, k := range keys { + lockIndexes[LockIndexForKey(k)] = struct{}{} + } + + locksToReturn := make([]*LockEntry, 0, len(keys)) + for i, l := range locks { + if _, ok := lockIndexes[uint8(i)]; ok { + locksToReturn = append(locksToReturn, l) + } + } + + return locksToReturn +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go b/vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go new file mode 100644 index 00000000000..211a545e336 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go @@ -0,0 +1,81 @@ +package logging + +import ( + "fmt" + "io" + "os" + "strings" + + log "github.com/hashicorp/go-hclog" +) + +type LogFormat int + +const ( + UnspecifiedFormat LogFormat = iota + StandardFormat + JSONFormat +) + +// Stringer implementation +func (l LogFormat) String() string { + switch l { + case UnspecifiedFormat: + return "unspecified" + case StandardFormat: + return "standard" + case JSONFormat: + return "json" + } + + // unreachable + return "unknown" +} + +// NewVaultLogger creates a new logger with the specified level and a Vault +// formatter +func NewVaultLogger(level log.Level) log.Logger { + return NewVaultLoggerWithWriter(log.DefaultOutput, level) +} + +// NewVaultLoggerWithWriter creates a new logger with the specified level and +// writer and a Vault formatter +func NewVaultLoggerWithWriter(w io.Writer, level log.Level) log.Logger { + opts := &log.LoggerOptions{ + Level: level, + IndependentLevels: true, + Output: w, + JSONFormat: ParseEnvLogFormat() == JSONFormat, + } + return log.New(opts) +} + +// ParseLogFormat parses the log format from the provided string. +func ParseLogFormat(format string) (LogFormat, error) { + switch strings.ToLower(strings.TrimSpace(format)) { + case "": + return UnspecifiedFormat, nil + case "standard": + return StandardFormat, nil + case "json": + return JSONFormat, nil + default: + return UnspecifiedFormat, fmt.Errorf("Unknown log format: %s", format) + } +} + +// ParseEnvLogFormat parses the log format from an environment variable. +func ParseEnvLogFormat() LogFormat { + logFormat := os.Getenv("VAULT_LOG_FORMAT") + if logFormat == "" { + logFormat = os.Getenv("LOGXI_FORMAT") + } + switch strings.ToLower(logFormat) { + case "json", "vault_json", "vault-json", "vaultjson": + return JSONFormat + case "standard": + return StandardFormat + default: + return UnspecifiedFormat + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go b/vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go new file mode 100644 index 00000000000..e0e39445b2a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go @@ -0,0 +1,136 @@ +package pathmanager + +import ( + "strings" + "sync" + + iradix "github.com/hashicorp/go-immutable-radix" +) + +// PathManager is a prefix searchable index of paths +type PathManager struct { + l sync.RWMutex + paths *iradix.Tree +} + +// New creates a new path manager +func New() *PathManager { + return &PathManager{ + paths: iradix.New(), + } +} + +// AddPaths adds path to the paths list +func (p *PathManager) AddPaths(paths []string) { + p.l.Lock() + defer p.l.Unlock() + + txn := p.paths.Txn() + for _, prefix := range paths { + if len(prefix) == 0 { + continue + } + + var exception bool + if strings.HasPrefix(prefix, "!") { + prefix = strings.TrimPrefix(prefix, "!") + exception = true + } + + // We trim any trailing *, but we don't touch whether it is a trailing + // slash or not since we want to be able to ignore prefixes that fully + // specify a file + txn.Insert([]byte(strings.TrimSuffix(prefix, "*")), exception) + } + p.paths = txn.Commit() +} + +// RemovePaths removes paths from the paths list +func (p *PathManager) RemovePaths(paths []string) { + p.l.Lock() + defer p.l.Unlock() + + txn := p.paths.Txn() + for _, prefix := range paths { + if len(prefix) == 0 { + continue + } + + // Exceptions aren't stored with the leading ! so strip it + if strings.HasPrefix(prefix, "!") { + prefix = strings.TrimPrefix(prefix, "!") + } + + // We trim any trailing *, but we don't touch whether it is a trailing + // slash or not since we want to be able to ignore prefixes that fully + // specify a file + txn.Delete([]byte(strings.TrimSuffix(prefix, "*"))) + } + p.paths = txn.Commit() +} + +// RemovePathPrefix removes all paths with the given prefix +func (p *PathManager) RemovePathPrefix(prefix string) { + p.l.Lock() + defer p.l.Unlock() + + // We trim any trailing *, but we don't touch whether it is a trailing + // slash or not since we want to be able to ignore prefixes that fully + // specify a file + p.paths, _ = p.paths.DeletePrefix([]byte(strings.TrimSuffix(prefix, "*"))) +} + +// Len returns the number of paths +func (p *PathManager) Len() int { + return p.paths.Len() +} + +// Paths returns the path list +func (p *PathManager) Paths() []string { + p.l.RLock() + defer p.l.RUnlock() + + paths := make([]string, 0, p.paths.Len()) + walkFn := func(k []byte, v interface{}) bool { + paths = append(paths, string(k)) + return false + } + p.paths.Root().Walk(walkFn) + return paths +} + +// HasPath returns if the prefix for the path exists regardless if it is a path +// (ending with /) or a prefix for a leaf node +func (p *PathManager) HasPath(path string) bool { + p.l.RLock() + defer p.l.RUnlock() + + if _, exceptionRaw, ok := p.paths.Root().LongestPrefix([]byte(path)); ok { + var exception bool + if exceptionRaw != nil { + exception = exceptionRaw.(bool) + } + return !exception + } + return false +} + +// HasExactPath returns if the longest match is an exact match for the +// full path +func (p *PathManager) HasExactPath(path string) bool { + p.l.RLock() + defer p.l.RUnlock() + + if val, exceptionRaw, ok := p.paths.Root().LongestPrefix([]byte(path)); ok { + var exception bool + if exceptionRaw != nil { + exception = exceptionRaw.(bool) + } + + strVal := string(val) + if strings.HasSuffix(strVal, "/") || strVal == path { + return !exception + } + } + return false +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go new file mode 100644 index 00000000000..df1fdbeede9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go @@ -0,0 +1,77 @@ +package pluginutil + +import ( + "os" + + "github.com/hashicorp/go-secure-stdlib/mlock" + version "github.com/hashicorp/go-version" +) + +const ( + // PluginAutoMTLSEnv is used to ensure AutoMTLS is used. This will override + // setting a TLSProviderFunc for a plugin. + PluginAutoMTLSEnv = "VAULT_PLUGIN_AUTOMTLS_ENABLED" + + // PluginMlockEnabled is the ENV name used to pass the configuration for + // enabling mlock + PluginMlockEnabled = "VAULT_PLUGIN_MLOCK_ENABLED" + + // PluginVaultVersionEnv is the ENV name used to pass the version of the + // vault server to the plugin + PluginVaultVersionEnv = "VAULT_VERSION" + + // PluginMetadataModeEnv is an ENV name used to disable TLS communication + // to bootstrap mounting plugins. + PluginMetadataModeEnv = "VAULT_PLUGIN_METADATA_MODE" + + // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the + // plugin. + PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN" + + // PluginCACertPEMEnv is an ENV name used for holding a CA PEM-encoded + // string. Used for testing. + PluginCACertPEMEnv = "VAULT_TESTING_PLUGIN_CA_PEM" + + // PluginMultiplexingOptOut is an ENV name used to define a comma separated list of plugin names + // opted-out of the multiplexing feature; for emergencies if multiplexing ever causes issues + PluginMultiplexingOptOut = "VAULT_PLUGIN_MULTIPLEXING_OPT_OUT" +) + +// OptionallyEnableMlock determines if mlock should be called, and if so enables +// mlock. +func OptionallyEnableMlock() error { + if os.Getenv(PluginMlockEnabled) == "true" { + return mlock.LockMemory() + } + + return nil +} + +// GRPCSupport defaults to returning true, unless VAULT_VERSION is missing or +// it fails to meet the version constraint. +func GRPCSupport() bool { + verString := os.Getenv(PluginVaultVersionEnv) + // If the env var is empty, we fall back to netrpc for backward compatibility. + if verString == "" { + return false + } + if verString != "unknown" { + ver, err := version.NewVersion(verString) + if err != nil { + return true + } + // Due to some regressions on 0.9.2 & 0.9.3 we now require version 0.9.4 + // to allow the plugin framework to default to gRPC. + constraint, err := version.NewConstraint(">= 0.9.4") + if err != nil { + return true + } + return constraint.Check(ver) + } + return true +} + +// InMetadataMode returns true if the plugin calling this function is running in metadata mode. +func InMetadataMode() bool { + return os.Getenv(PluginMetadataModeEnv) == "true" +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.go new file mode 100644 index 00000000000..41316ec49df --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.go @@ -0,0 +1,80 @@ +package pluginutil + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +var ErrNoMultiplexingIDFound = errors.New("no multiplexing ID found") + +type PluginMultiplexingServerImpl struct { + UnimplementedPluginMultiplexingServer + + Supported bool +} + +func (pm PluginMultiplexingServerImpl) MultiplexingSupport(_ context.Context, _ *MultiplexingSupportRequest) (*MultiplexingSupportResponse, error) { + return &MultiplexingSupportResponse{ + Supported: pm.Supported, + }, nil +} + +func MultiplexingSupported(ctx context.Context, cc grpc.ClientConnInterface, name string) (bool, error) { + if cc == nil { + return false, fmt.Errorf("client connection is nil") + } + + out := strings.Split(os.Getenv(PluginMultiplexingOptOut), ",") + if strutil.StrListContains(out, name) { + return false, nil + } + + req := new(MultiplexingSupportRequest) + resp, err := NewPluginMultiplexingClient(cc).MultiplexingSupport(ctx, req) + if err != nil { + + // If the server does not implement the multiplexing server then we can + // assume it is not multiplexed + if status.Code(err) == codes.Unimplemented { + return false, nil + } + + return false, err + } + if resp == nil { + // Somehow got a nil response, assume not multiplexed + return false, nil + } + + return resp.Supported, nil +} + +func GetMultiplexIDFromContext(ctx context.Context) (string, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "", fmt.Errorf("missing plugin multiplexing metadata") + } + + multiplexIDs := md[MultiplexingCtxKey] + if len(multiplexIDs) == 0 { + return "", ErrNoMultiplexingIDFound + } else if len(multiplexIDs) != 1 { + return "", fmt.Errorf("unexpected number of IDs in metadata: (%d)", len(multiplexIDs)) + } + + multiplexID := multiplexIDs[0] + if multiplexID == "" { + return "", fmt.Errorf("empty multiplex ID in metadata") + } + + return multiplexID, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.pb.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.pb.go new file mode 100644 index 00000000000..96963af3e62 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.pb.go @@ -0,0 +1,213 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.7 +// source: sdk/helper/pluginutil/multiplexing.proto + +package pluginutil + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type MultiplexingSupportRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *MultiplexingSupportRequest) Reset() { + *x = MultiplexingSupportRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MultiplexingSupportRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MultiplexingSupportRequest) ProtoMessage() {} + +func (x *MultiplexingSupportRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MultiplexingSupportRequest.ProtoReflect.Descriptor instead. +func (*MultiplexingSupportRequest) Descriptor() ([]byte, []int) { + return file_sdk_helper_pluginutil_multiplexing_proto_rawDescGZIP(), []int{0} +} + +type MultiplexingSupportResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Supported bool `protobuf:"varint,1,opt,name=supported,proto3" json:"supported,omitempty"` +} + +func (x *MultiplexingSupportResponse) Reset() { + *x = MultiplexingSupportResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MultiplexingSupportResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MultiplexingSupportResponse) ProtoMessage() {} + +func (x *MultiplexingSupportResponse) ProtoReflect() protoreflect.Message { + mi := &file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MultiplexingSupportResponse.ProtoReflect.Descriptor instead. +func (*MultiplexingSupportResponse) Descriptor() ([]byte, []int) { + return file_sdk_helper_pluginutil_multiplexing_proto_rawDescGZIP(), []int{1} +} + +func (x *MultiplexingSupportResponse) GetSupported() bool { + if x != nil { + return x.Supported + } + return false +} + +var File_sdk_helper_pluginutil_multiplexing_proto protoreflect.FileDescriptor + +var file_sdk_helper_pluginutil_multiplexing_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x73, 0x64, 0x6b, 0x2f, 0x68, 0x65, 0x6c, 0x70, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x2f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, + 0x78, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, + 0x69, 0x6e, 0x67, 0x22, 0x1c, 0x0a, 0x1a, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, + 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x3b, 0x0a, 0x1b, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, + 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x32, 0x97, + 0x01, 0x0a, 0x12, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, + 0x65, 0x78, 0x69, 0x6e, 0x67, 0x12, 0x80, 0x01, 0x0a, 0x13, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, + 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x33, 0x2e, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, + 0x78, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x2e, + 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x68, 0x65, 0x6c, 0x70, 0x65, + 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sdk_helper_pluginutil_multiplexing_proto_rawDescOnce sync.Once + file_sdk_helper_pluginutil_multiplexing_proto_rawDescData = file_sdk_helper_pluginutil_multiplexing_proto_rawDesc +) + +func file_sdk_helper_pluginutil_multiplexing_proto_rawDescGZIP() []byte { + file_sdk_helper_pluginutil_multiplexing_proto_rawDescOnce.Do(func() { + file_sdk_helper_pluginutil_multiplexing_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_helper_pluginutil_multiplexing_proto_rawDescData) + }) + return file_sdk_helper_pluginutil_multiplexing_proto_rawDescData +} + +var file_sdk_helper_pluginutil_multiplexing_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_sdk_helper_pluginutil_multiplexing_proto_goTypes = []interface{}{ + (*MultiplexingSupportRequest)(nil), // 0: pluginutil.multiplexing.MultiplexingSupportRequest + (*MultiplexingSupportResponse)(nil), // 1: pluginutil.multiplexing.MultiplexingSupportResponse +} +var file_sdk_helper_pluginutil_multiplexing_proto_depIdxs = []int32{ + 0, // 0: pluginutil.multiplexing.PluginMultiplexing.MultiplexingSupport:input_type -> pluginutil.multiplexing.MultiplexingSupportRequest + 1, // 1: pluginutil.multiplexing.PluginMultiplexing.MultiplexingSupport:output_type -> pluginutil.multiplexing.MultiplexingSupportResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_sdk_helper_pluginutil_multiplexing_proto_init() } +func file_sdk_helper_pluginutil_multiplexing_proto_init() { + if File_sdk_helper_pluginutil_multiplexing_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MultiplexingSupportRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MultiplexingSupportResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_helper_pluginutil_multiplexing_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_sdk_helper_pluginutil_multiplexing_proto_goTypes, + DependencyIndexes: file_sdk_helper_pluginutil_multiplexing_proto_depIdxs, + MessageInfos: file_sdk_helper_pluginutil_multiplexing_proto_msgTypes, + }.Build() + File_sdk_helper_pluginutil_multiplexing_proto = out.File + file_sdk_helper_pluginutil_multiplexing_proto_rawDesc = nil + file_sdk_helper_pluginutil_multiplexing_proto_goTypes = nil + file_sdk_helper_pluginutil_multiplexing_proto_depIdxs = nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.proto b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.proto new file mode 100644 index 00000000000..aa2438b070f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; +package pluginutil.multiplexing; + +option go_package = "github.com/hashicorp/vault/sdk/helper/pluginutil"; + +message MultiplexingSupportRequest {} +message MultiplexingSupportResponse { + bool supported = 1; +} + +service PluginMultiplexing { + rpc MultiplexingSupport(MultiplexingSupportRequest) returns (MultiplexingSupportResponse); +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing_grpc.pb.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing_grpc.pb.go new file mode 100644 index 00000000000..aa8d0e47ba8 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing_grpc.pb.go @@ -0,0 +1,101 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package pluginutil + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// PluginMultiplexingClient is the client API for PluginMultiplexing service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type PluginMultiplexingClient interface { + MultiplexingSupport(ctx context.Context, in *MultiplexingSupportRequest, opts ...grpc.CallOption) (*MultiplexingSupportResponse, error) +} + +type pluginMultiplexingClient struct { + cc grpc.ClientConnInterface +} + +func NewPluginMultiplexingClient(cc grpc.ClientConnInterface) PluginMultiplexingClient { + return &pluginMultiplexingClient{cc} +} + +func (c *pluginMultiplexingClient) MultiplexingSupport(ctx context.Context, in *MultiplexingSupportRequest, opts ...grpc.CallOption) (*MultiplexingSupportResponse, error) { + out := new(MultiplexingSupportResponse) + err := c.cc.Invoke(ctx, "/pluginutil.multiplexing.PluginMultiplexing/MultiplexingSupport", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// PluginMultiplexingServer is the server API for PluginMultiplexing service. +// All implementations must embed UnimplementedPluginMultiplexingServer +// for forward compatibility +type PluginMultiplexingServer interface { + MultiplexingSupport(context.Context, *MultiplexingSupportRequest) (*MultiplexingSupportResponse, error) + mustEmbedUnimplementedPluginMultiplexingServer() +} + +// UnimplementedPluginMultiplexingServer must be embedded to have forward compatible implementations. +type UnimplementedPluginMultiplexingServer struct { +} + +func (UnimplementedPluginMultiplexingServer) MultiplexingSupport(context.Context, *MultiplexingSupportRequest) (*MultiplexingSupportResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiplexingSupport not implemented") +} +func (UnimplementedPluginMultiplexingServer) mustEmbedUnimplementedPluginMultiplexingServer() {} + +// UnsafePluginMultiplexingServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to PluginMultiplexingServer will +// result in compilation errors. +type UnsafePluginMultiplexingServer interface { + mustEmbedUnimplementedPluginMultiplexingServer() +} + +func RegisterPluginMultiplexingServer(s grpc.ServiceRegistrar, srv PluginMultiplexingServer) { + s.RegisterService(&PluginMultiplexing_ServiceDesc, srv) +} + +func _PluginMultiplexing_MultiplexingSupport_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MultiplexingSupportRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PluginMultiplexingServer).MultiplexingSupport(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pluginutil.multiplexing.PluginMultiplexing/MultiplexingSupport", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PluginMultiplexingServer).MultiplexingSupport(ctx, req.(*MultiplexingSupportRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// PluginMultiplexing_ServiceDesc is the grpc.ServiceDesc for PluginMultiplexing service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var PluginMultiplexing_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "pluginutil.multiplexing.PluginMultiplexing", + HandlerType: (*PluginMultiplexingServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "MultiplexingSupport", + Handler: _PluginMultiplexing_MultiplexingSupport_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "sdk/helper/pluginutil/multiplexing.proto", +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/run_config.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/run_config.go new file mode 100644 index 00000000000..3eb8fb2b281 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/run_config.go @@ -0,0 +1,179 @@ +package pluginutil + +import ( + "context" + "crypto/sha256" + "crypto/tls" + "fmt" + "os/exec" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/version" +) + +type PluginClientConfig struct { + Name string + PluginType consts.PluginType + Version string + PluginSets map[int]plugin.PluginSet + HandshakeConfig plugin.HandshakeConfig + Logger log.Logger + IsMetadataMode bool + AutoMTLS bool + MLock bool + Wrapper RunnerUtil +} + +type runConfig struct { + // Provided by PluginRunner + command string + args []string + sha256 []byte + + // Initialized with what's in PluginRunner.Env, but can be added to + env []string + + PluginClientConfig +} + +func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error) { + cmd := exec.Command(rc.command, rc.args...) + cmd.Env = append(cmd.Env, rc.env...) + + // Add the mlock setting to the ENV of the plugin + if rc.MLock || (rc.Wrapper != nil && rc.Wrapper.MlockEnabled()) { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true")) + } + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version)) + + if rc.IsMetadataMode { + rc.Logger = rc.Logger.With("metadata", "true") + } + metadataEnv := fmt.Sprintf("%s=%t", PluginMetadataModeEnv, rc.IsMetadataMode) + cmd.Env = append(cmd.Env, metadataEnv) + + automtlsEnv := fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, rc.AutoMTLS) + cmd.Env = append(cmd.Env, automtlsEnv) + + var clientTLSConfig *tls.Config + if !rc.AutoMTLS && !rc.IsMetadataMode { + // Get a CA TLS Certificate + certBytes, key, err := generateCert() + if err != nil { + return nil, err + } + + // Use CA to sign a client cert and return a configured TLS config + clientTLSConfig, err = createClientTLSConfig(certBytes, key) + if err != nil { + return nil, err + } + + // Use CA to sign a server cert and wrap the values in a response wrapped + // token. + wrapToken, err := wrapServerConfig(ctx, rc.Wrapper, certBytes, key) + if err != nil { + return nil, err + } + + // Add the response wrap token to the ENV of the plugin + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, wrapToken)) + } + + secureConfig := &plugin.SecureConfig{ + Checksum: rc.sha256, + Hash: sha256.New(), + } + + clientConfig := &plugin.ClientConfig{ + HandshakeConfig: rc.HandshakeConfig, + VersionedPlugins: rc.PluginSets, + Cmd: cmd, + SecureConfig: secureConfig, + TLSConfig: clientTLSConfig, + Logger: rc.Logger, + AllowedProtocols: []plugin.Protocol{ + plugin.ProtocolNetRPC, + plugin.ProtocolGRPC, + }, + AutoMTLS: rc.AutoMTLS, + } + return clientConfig, nil +} + +func (rc runConfig) run(ctx context.Context) (*plugin.Client, error) { + clientConfig, err := rc.makeConfig(ctx) + if err != nil { + return nil, err + } + + client := plugin.NewClient(clientConfig) + return client, nil +} + +type RunOpt func(*runConfig) + +func Env(env ...string) RunOpt { + return func(rc *runConfig) { + rc.env = append(rc.env, env...) + } +} + +func Runner(wrapper RunnerUtil) RunOpt { + return func(rc *runConfig) { + rc.Wrapper = wrapper + } +} + +func PluginSets(pluginSets map[int]plugin.PluginSet) RunOpt { + return func(rc *runConfig) { + rc.PluginSets = pluginSets + } +} + +func HandshakeConfig(hs plugin.HandshakeConfig) RunOpt { + return func(rc *runConfig) { + rc.HandshakeConfig = hs + } +} + +func Logger(logger log.Logger) RunOpt { + return func(rc *runConfig) { + rc.Logger = logger + } +} + +func MetadataMode(isMetadataMode bool) RunOpt { + return func(rc *runConfig) { + rc.IsMetadataMode = isMetadataMode + } +} + +func AutoMTLS(autoMTLS bool) RunOpt { + return func(rc *runConfig) { + rc.AutoMTLS = autoMTLS + } +} + +func MLock(mlock bool) RunOpt { + return func(rc *runConfig) { + rc.MLock = mlock + } +} + +func (r *PluginRunner) RunConfig(ctx context.Context, opts ...RunOpt) (*plugin.Client, error) { + rc := runConfig{ + command: r.Command, + args: r.Args, + sha256: r.Sha256, + env: r.Env, + } + + for _, opt := range opts { + opt(&rc) + } + + return rc.run(ctx) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go new file mode 100644 index 00000000000..631c4f3a2f3 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go @@ -0,0 +1,115 @@ +package pluginutil + +import ( + "context" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/go-version" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "google.golang.org/grpc" +) + +// Looker defines the plugin Lookup function that looks into the plugin catalog +// for available plugins and returns a PluginRunner +type Looker interface { + LookupPlugin(ctx context.Context, pluginName string, pluginType consts.PluginType) (*PluginRunner, error) + LookupPluginVersion(ctx context.Context, pluginName string, pluginType consts.PluginType, version string) (*PluginRunner, error) +} + +// RunnerUtil interface defines the functions needed by the runner to wrap the +// metadata needed to run a plugin process. This includes looking up Mlock +// configuration and wrapping data in a response wrapped token. +// logical.SystemView implementations satisfy this interface. +type RunnerUtil interface { + NewPluginClient(ctx context.Context, config PluginClientConfig) (PluginClient, error) + ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) + MlockEnabled() bool +} + +// LookRunnerUtil defines the functions for both Looker and Wrapper +type LookRunnerUtil interface { + Looker + RunnerUtil +} + +type PluginClient interface { + Conn() grpc.ClientConnInterface + Reload() error + plugin.ClientProtocol +} + +const MultiplexingCtxKey string = "multiplex_id" + +// PluginRunner defines the metadata needed to run a plugin securely with +// go-plugin. +type PluginRunner struct { + Name string `json:"name" structs:"name"` + Type consts.PluginType `json:"type" structs:"type"` + Version string `json:"version" structs:"version"` + Command string `json:"command" structs:"command"` + Args []string `json:"args" structs:"args"` + Env []string `json:"env" structs:"env"` + Sha256 []byte `json:"sha256" structs:"sha256"` + Builtin bool `json:"builtin" structs:"builtin"` + BuiltinFactory func() (interface{}, error) `json:"-" structs:"-"` +} + +// Run takes a wrapper RunnerUtil instance along with the go-plugin parameters and +// returns a configured plugin.Client with TLS Configured and a wrapping token set +// on PluginUnwrapTokenEnv for plugin process consumption. +func (r *PluginRunner) Run(ctx context.Context, wrapper RunnerUtil, pluginSets map[int]plugin.PluginSet, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) { + return r.RunConfig(ctx, + Runner(wrapper), + PluginSets(pluginSets), + HandshakeConfig(hs), + Env(env...), + Logger(logger), + MetadataMode(false), + ) +} + +// RunMetadataMode returns a configured plugin.Client that will dispense a plugin +// in metadata mode. The PluginMetadataModeEnv is passed in as part of the Cmd to +// plugin.Client, and consumed by the plugin process on api.VaultPluginTLSProvider. +func (r *PluginRunner) RunMetadataMode(ctx context.Context, wrapper RunnerUtil, pluginSets map[int]plugin.PluginSet, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) { + return r.RunConfig(ctx, + Runner(wrapper), + PluginSets(pluginSets), + HandshakeConfig(hs), + Env(env...), + Logger(logger), + MetadataMode(true), + ) +} + +// VersionedPlugin holds any versioning information stored about a plugin in the +// plugin catalog. +type VersionedPlugin struct { + Type string `json:"type"` // string instead of consts.PluginType so that we get the string form in API responses. + Name string `json:"name"` + Version string `json:"version"` + SHA256 string `json:"sha256,omitempty"` + Builtin bool `json:"builtin"` + DeprecationStatus string `json:"deprecation_status,omitempty"` + + // Pre-parsed semver struct of the Version field + SemanticVersion *version.Version `json:"-"` +} + +// CtxCancelIfCanceled takes a context cancel func and a context. If the context is +// shutdown the cancelfunc is called. This is useful for merging two cancel +// functions. +func CtxCancelIfCanceled(f context.CancelFunc, ctxCanceler context.Context) chan struct{} { + quitCh := make(chan struct{}) + go func() { + select { + case <-quitCh: + case <-ctxCanceler.Done(): + f() + } + }() + return quitCh +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go new file mode 100644 index 00000000000..c5fff6d701e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go @@ -0,0 +1,106 @@ +package pluginutil + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/helper/certutil" +) + +// generateCert is used internally to create certificates for the plugin +// client and server. +func generateCert() ([]byte, *ecdsa.PrivateKey, error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, nil, err + } + + host, err := uuid.GenerateUUID() + if err != nil { + return nil, nil, err + } + + sn, err := certutil.GenerateSerialNumber() + if err != nil { + return nil, nil, err + } + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: sn, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + IsCA: true, + } + + certBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, nil, errwrap.Wrapf("unable to generate client certificate: {{err}}", err) + } + + return certBytes, key, nil +} + +// createClientTLSConfig creates a signed certificate and returns a configured +// TLS config. +func createClientTLSConfig(certBytes []byte, key *ecdsa.PrivateKey) (*tls.Config, error) { + clientCert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, errwrap.Wrapf("error parsing generated plugin certificate: {{err}}", err) + } + + cert := tls.Certificate{ + Certificate: [][]byte{certBytes}, + PrivateKey: key, + Leaf: clientCert, + } + + clientCertPool := x509.NewCertPool() + clientCertPool.AddCert(clientCert) + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: clientCertPool, + ClientCAs: clientCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + ServerName: clientCert.Subject.CommonName, + MinVersion: tls.VersionTLS12, + } + + return tlsConfig, nil +} + +// wrapServerConfig is used to create a server certificate and private key, then +// wrap them in an unwrap token for later retrieval by the plugin. +func wrapServerConfig(ctx context.Context, sys RunnerUtil, certBytes []byte, key *ecdsa.PrivateKey) (string, error) { + rawKey, err := x509.MarshalECPrivateKey(key) + if err != nil { + return "", err + } + + wrapInfo, err := sys.ResponseWrapData(ctx, map[string]interface{}{ + "ServerCert": certBytes, + "ServerKey": rawKey, + }, time.Second*60, true) + if err != nil { + return "", err + } + + return wrapInfo.Token, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/strutil/strutil.go b/vendor/github.com/hashicorp/vault/sdk/helper/strutil/strutil.go new file mode 100644 index 00000000000..09cc9425cb1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/strutil/strutil.go @@ -0,0 +1,94 @@ +// DEPRECATED: this has been moved to go-secure-stdlib and will be removed +package strutil + +import ( + extstrutil "github.com/hashicorp/go-secure-stdlib/strutil" +) + +func StrListContainsGlob(haystack []string, needle string) bool { + return extstrutil.StrListContainsGlob(haystack, needle) +} + +func StrListContains(haystack []string, needle string) bool { + return extstrutil.StrListContains(haystack, needle) +} + +func StrListContainsCaseInsensitive(haystack []string, needle string) bool { + return extstrutil.StrListContainsCaseInsensitive(haystack, needle) +} + +func StrListSubset(super, sub []string) bool { + return extstrutil.StrListSubset(super, sub) +} + +func ParseDedupAndSortStrings(input string, sep string) []string { + return extstrutil.ParseDedupAndSortStrings(input, sep) +} + +func ParseDedupLowercaseAndSortStrings(input string, sep string) []string { + return extstrutil.ParseDedupLowercaseAndSortStrings(input, sep) +} + +func ParseKeyValues(input string, out map[string]string, sep string) error { + return extstrutil.ParseKeyValues(input, out, sep) +} + +func ParseArbitraryKeyValues(input string, out map[string]string, sep string) error { + return extstrutil.ParseArbitraryKeyValues(input, out, sep) +} + +func ParseStringSlice(input string, sep string) []string { + return extstrutil.ParseStringSlice(input, sep) +} + +func ParseArbitraryStringSlice(input string, sep string) []string { + return extstrutil.ParseArbitraryStringSlice(input, sep) +} + +func TrimStrings(items []string) []string { + return extstrutil.TrimStrings(items) +} + +func RemoveDuplicates(items []string, lowercase bool) []string { + return extstrutil.RemoveDuplicates(items, lowercase) +} + +func RemoveDuplicatesStable(items []string, caseInsensitive bool) []string { + return extstrutil.RemoveDuplicatesStable(items, caseInsensitive) +} + +func RemoveEmpty(items []string) []string { + return extstrutil.RemoveEmpty(items) +} + +func EquivalentSlices(a, b []string) bool { + return extstrutil.EquivalentSlices(a, b) +} + +func EqualStringMaps(a, b map[string]string) bool { + return extstrutil.EqualStringMaps(a, b) +} + +func StrListDelete(s []string, d string) []string { + return extstrutil.StrListDelete(s, d) +} + +func GlobbedStringsMatch(item, val string) bool { + return extstrutil.GlobbedStringsMatch(item, val) +} + +func AppendIfMissing(slice []string, i string) []string { + return extstrutil.AppendIfMissing(slice, i) +} + +func MergeSlices(args ...[]string) []string { + return extstrutil.MergeSlices(args...) +} + +func Difference(a, b []string, lowercase bool) []string { + return extstrutil.Difference(a, b, lowercase) +} + +func GetString(m map[string]interface{}, key string) (string, error) { + return extstrutil.GetString(m, key) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go b/vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go new file mode 100644 index 00000000000..8d8e63340f9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go @@ -0,0 +1,37 @@ +package wrapping + +import "time" + +type ResponseWrapInfo struct { + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl" sentinel:""` + + // The token containing the wrapped response + Token string `json:"token" structs:"token" mapstructure:"token" sentinel:""` + + // The token accessor for the wrapped response token + Accessor string `json:"accessor" structs:"accessor" mapstructure:"accessor"` + + // The creation time. This can be used with the TTL to figure out an + // expected expiration. + CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time" sentinel:""` + + // If the contained response is the output of a token or approle secret-id creation call, the + // created token's/secret-id's accessor will be accessible here + WrappedAccessor string `json:"wrapped_accessor" structs:"wrapped_accessor" mapstructure:"wrapped_accessor" sentinel:""` + + // WrappedEntityID is the entity identifier of the caller who initiated the + // wrapping request + WrappedEntityID string `json:"wrapped_entity_id" structs:"wrapped_entity_id" mapstructure:"wrapped_entity_id" sentinel:""` + + // The format to use. This doesn't get returned, it's only internal. + Format string `json:"format" structs:"format" mapstructure:"format" sentinel:""` + + // CreationPath is the original request path that was used to create + // the wrapped response. + CreationPath string `json:"creation_path" structs:"creation_path" mapstructure:"creation_path" sentinel:""` + + // Controls seal wrapping behavior downstream for specific use cases + SealWrap bool `json:"seal_wrap" structs:"seal_wrap" mapstructure:"seal_wrap" sentinel:""` +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/audit.go b/vendor/github.com/hashicorp/vault/sdk/logical/audit.go new file mode 100644 index 00000000000..8ba70f37e01 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/audit.go @@ -0,0 +1,19 @@ +package logical + +type LogInput struct { + Type string + Auth *Auth + Request *Request + Response *Response + OuterErr error + NonHMACReqDataKeys []string + NonHMACRespDataKeys []string +} + +type MarshalOptions struct { + ValueHasher func(string) string +} + +type OptMarshaler interface { + MarshalJSONWithOptions(*MarshalOptions) ([]byte, error) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/auth.go b/vendor/github.com/hashicorp/vault/sdk/logical/auth.go new file mode 100644 index 00000000000..62707e81959 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/auth.go @@ -0,0 +1,129 @@ +package logical + +import ( + "fmt" + "time" + + sockaddr "github.com/hashicorp/go-sockaddr" +) + +// Auth is the resulting authentication information that is part of +// Response for credential backends. It's also attached to Request objects and +// defines the authentication used for the request. This value is audit logged. +type Auth struct { + LeaseOptions + + // InternalData is JSON-encodable data that is stored with the auth struct. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + InternalData map[string]interface{} `json:"internal_data" mapstructure:"internal_data" structs:"internal_data"` + + // DisplayName is a non-security sensitive identifier that is + // applicable to this Auth. It is used for logging and prefixing + // of dynamic secrets. For example, DisplayName may be "armon" for + // the github credential backend. If the client token is used to + // generate a SQL credential, the user may be "github-armon-uuid". + // This is to help identify the source without using audit tables. + DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"` + + // Policies is the list of policies that the authenticated user + // is associated with. + Policies []string `json:"policies" mapstructure:"policies" structs:"policies"` + + // TokenPolicies and IdentityPolicies break down the list in Policies to + // help determine where a policy was sourced + TokenPolicies []string `json:"token_policies" mapstructure:"token_policies" structs:"token_policies"` + IdentityPolicies []string `json:"identity_policies" mapstructure:"identity_policies" structs:"identity_policies"` + + // ExternalNamespacePolicies represent the policies authorized from + // different namespaces indexed by respective namespace identifiers + ExternalNamespacePolicies map[string][]string `json:"external_namespace_policies" mapstructure:"external_namespace_policies" structs:"external_namespace_policies"` + + // Indicates that the default policy should not be added by core when + // creating a token. The default policy will still be added if it's + // explicitly defined. + NoDefaultPolicy bool `json:"no_default_policy" mapstructure:"no_default_policy" structs:"no_default_policy"` + + // Metadata is used to attach arbitrary string-type metadata to + // an authenticated user. This metadata will be outputted into the + // audit log. + Metadata map[string]string `json:"metadata" mapstructure:"metadata" structs:"metadata"` + + // ClientToken is the token that is generated for the authentication. + // This will be filled in by Vault core when an auth structure is + // returned. Setting this manually will have no effect. + ClientToken string `json:"client_token" mapstructure:"client_token" structs:"client_token"` + + // Accessor is the identifier for the ClientToken. This can be used + // to perform management functionalities (especially revocation) when + // ClientToken in the audit logs are obfuscated. Accessor can be used + // to revoke a ClientToken and to lookup the capabilities of the ClientToken, + // both without actually knowing the ClientToken. + Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor"` + + // Period indicates that the token generated using this Auth object + // should never expire. The token should be renewed within the duration + // specified by this period. + Period time.Duration `json:"period" mapstructure:"period" structs:"period"` + + // ExplicitMaxTTL is the max TTL that constrains periodic tokens. For normal + // tokens, this value is constrained by the configured max ttl. + ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl"` + + // Number of allowed uses of the issued token + NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"` + + // EntityID is the identifier of the entity in identity store to which the + // identity of the authenticating client belongs to. + EntityID string `json:"entity_id" mapstructure:"entity_id" structs:"entity_id"` + + // Alias is the information about the authenticated client returned by + // the auth backend + Alias *Alias `json:"alias" mapstructure:"alias" structs:"alias"` + + // GroupAliases are the informational mappings of external groups which an + // authenticated user belongs to. This is used to check if there are + // mappings groups for the group aliases in identity store. For all the + // matching groups, the entity ID of the user will be added. + GroupAliases []*Alias `json:"group_aliases" mapstructure:"group_aliases" structs:"group_aliases"` + + // The set of CIDRs that this token can be used with + BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs"` + + // CreationPath is a path that the backend can return to use in the lease. + // This is currently only supported for the token store where roles may + // change the perceived path of the lease, even though they don't change + // the request path itself. + CreationPath string `json:"creation_path"` + + // TokenType is the type of token being requested + TokenType TokenType `json:"token_type"` + + // Orphan is set if the token does not have a parent + Orphan bool `json:"orphan"` + + // PolicyResults is the set of policies that grant the token access to the + // requesting path. + PolicyResults *PolicyResults `json:"policy_results"` + + // MFARequirement + MFARequirement *MFARequirement `json:"mfa_requirement"` + + // EntityCreated is set to true if an entity is created as part of a login request + EntityCreated bool `json:"entity_created"` +} + +func (a *Auth) GoString() string { + return fmt.Sprintf("*%#v", *a) +} + +type PolicyResults struct { + Allowed bool `json:"allowed"` + GrantingPolicies []PolicyInfo `json:"granting_policies"` +} + +type PolicyInfo struct { + Name string `json:"name"` + NamespaceId string `json:"namespace_id"` + Type string `json:"type"` +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/connection.go b/vendor/github.com/hashicorp/vault/sdk/logical/connection.go new file mode 100644 index 00000000000..5be86307707 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/connection.go @@ -0,0 +1,18 @@ +package logical + +import ( + "crypto/tls" +) + +// Connection represents the connection information for a request. This +// is present on the Request structure for credential backends. +type Connection struct { + // RemoteAddr is the network address that sent the request. + RemoteAddr string `json:"remote_addr"` + + // RemotePort is the network port that sent the request. + RemotePort int `json:"remote_port"` + + // ConnState is the TLS connection state if applicable. + ConnState *tls.ConnectionState `sentinel:""` +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go b/vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go new file mode 100644 index 00000000000..2ed1b07688d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go @@ -0,0 +1,17 @@ +package logical + +import ( + "time" +) + +type ControlGroup struct { + Authorizations []*Authz `json:"authorizations"` + RequestTime time.Time `json:"request_time"` + Approved bool `json:"approved"` + NamespaceID string `json:"namespace_id"` +} + +type Authz struct { + Token string `json:"token"` + AuthorizationTime time.Time `json:"authorization_time"` +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/error.go b/vendor/github.com/hashicorp/vault/sdk/logical/error.go new file mode 100644 index 00000000000..68c8e137320 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/error.go @@ -0,0 +1,122 @@ +package logical + +import "errors" + +var ( + // ErrUnsupportedOperation is returned if the operation is not supported + // by the logical backend. + ErrUnsupportedOperation = errors.New("unsupported operation") + + // ErrUnsupportedPath is returned if the path is not supported + // by the logical backend. + ErrUnsupportedPath = errors.New("unsupported path") + + // ErrInvalidRequest is returned if the request is invalid + ErrInvalidRequest = errors.New("invalid request") + + // ErrPermissionDenied is returned if the client is not authorized + ErrPermissionDenied = errors.New("permission denied") + + // ErrInvalidCredentials is returned when the provided credentials are incorrect + // This is used internally for user lockout purposes. This is not seen externally. + // The status code returned does not change because of this error + ErrInvalidCredentials = errors.New("invalid credentials") + + // ErrMultiAuthzPending is returned if the the request needs more + // authorizations + ErrMultiAuthzPending = errors.New("request needs further approval") + + // ErrUpstreamRateLimited is returned when Vault receives a rate limited + // response from an upstream + ErrUpstreamRateLimited = errors.New("upstream rate limited") + + // ErrPerfStandbyForward is returned when Vault is in a state such that a + // perf standby cannot satisfy a request + ErrPerfStandbyPleaseForward = errors.New("please forward to the active node") + + // ErrLeaseCountQuotaExceeded is returned when a request is rejected due to a lease + // count quota being exceeded. + ErrLeaseCountQuotaExceeded = errors.New("lease count quota exceeded") + + // ErrRateLimitQuotaExceeded is returned when a request is rejected due to a + // rate limit quota being exceeded. + ErrRateLimitQuotaExceeded = errors.New("rate limit quota exceeded") + + // ErrUnrecoverable is returned when a request fails due to something that + // is likely to require manual intervention. This is a generic form of an + // unrecoverable error. + // e.g.: misconfigured or disconnected storage backend. + ErrUnrecoverable = errors.New("unrecoverable error") + + // ErrMissingRequiredState is returned when a request can't be satisfied + // with the data in the local node's storage, based on the provided + // X-Vault-Index request header. + ErrMissingRequiredState = errors.New("required index state not present") + + // Error indicating that the requested path used to serve a purpose in older + // versions, but the functionality has now been removed + ErrPathFunctionalityRemoved = errors.New("functionality on this path has been removed") +) + +type HTTPCodedError interface { + Error() string + Code() int +} + +func CodedError(status int, msg string) HTTPCodedError { + return &codedError{ + Status: status, + Message: msg, + } +} + +var _ HTTPCodedError = (*codedError)(nil) + +type codedError struct { + Status int + Message string +} + +func (e *codedError) Error() string { + return e.Message +} + +func (e *codedError) Code() int { + return e.Status +} + +// Struct to identify user input errors. This is helpful in responding the +// appropriate status codes to clients from the HTTP endpoints. +type StatusBadRequest struct { + Err string +} + +// Implementing error interface +func (s *StatusBadRequest) Error() string { + return s.Err +} + +// This is a new type declared to not cause potential compatibility problems if +// the logic around the CodedError changes; in particular for logical request +// paths it is basically ignored, and changing that behavior might cause +// unforeseen issues. +type ReplicationCodedError struct { + Msg string + Code int +} + +func (r *ReplicationCodedError) Error() string { + return r.Msg +} + +type KeyNotFoundError struct { + Err error +} + +func (e *KeyNotFoundError) WrappedErrors() []error { + return []error{e.Err} +} + +func (e *KeyNotFoundError) Error() string { + return e.Err.Error() +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go b/vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go new file mode 100644 index 00000000000..18af6e6828c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go @@ -0,0 +1,709 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.7 +// source: sdk/logical/identity.proto + +package logical + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Entity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID is the unique identifier for the entity + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Name is the human-friendly unique identifier for the entity + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Aliases contains thhe alias mappings for the given entity + Aliases []*Alias `protobuf:"bytes,3,rep,name=aliases,proto3" json:"aliases,omitempty"` + // Metadata represents the custom data tied to this entity + Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Disabled is true if the entity is disabled. + Disabled bool `protobuf:"varint,5,opt,name=disabled,proto3" json:"disabled,omitempty"` + // NamespaceID is the identifier of the namespace to which this entity + // belongs to. + NamespaceID string `protobuf:"bytes,6,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` +} + +func (x *Entity) Reset() { + *x = Entity{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_identity_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Entity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Entity) ProtoMessage() {} + +func (x *Entity) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_identity_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Entity.ProtoReflect.Descriptor instead. +func (*Entity) Descriptor() ([]byte, []int) { + return file_sdk_logical_identity_proto_rawDescGZIP(), []int{0} +} + +func (x *Entity) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Entity) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Entity) GetAliases() []*Alias { + if x != nil { + return x.Aliases + } + return nil +} + +func (x *Entity) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Entity) GetDisabled() bool { + if x != nil { + return x.Disabled + } + return false +} + +func (x *Entity) GetNamespaceID() string { + if x != nil { + return x.NamespaceID + } + return "" +} + +type Alias struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // MountType is the backend mount's type to which this identity belongs + MountType string `protobuf:"bytes,1,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"` + // MountAccessor is the identifier of the mount entry to which this + // identity belongs + MountAccessor string `protobuf:"bytes,2,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"` + // Name is the identifier of this identity in its authentication source + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Metadata represents the custom data tied to this alias. Fields added + // to it should have a low rate of change (or no change) because each + // change incurs a storage write, so quickly-changing fields can have + // a significant performance impact at scale. See the SDK's + // "aliasmetadata" package for a helper that eases and standardizes + // using this safely. + Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ID is the unique identifier for the alias + ID string `protobuf:"bytes,5,opt,name=ID,proto3" json:"ID,omitempty"` + // NamespaceID is the identifier of the namespace to which this alias + // belongs. + NamespaceID string `protobuf:"bytes,6,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` + // Custom Metadata represents the custom data tied to this alias + CustomMetadata map[string]string `protobuf:"bytes,7,rep,name=custom_metadata,json=customMetadata,proto3" json:"custom_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Local indicates if the alias only belongs to the cluster where it was + // created. If true, the alias will be stored in a location that are ignored + // by the performance replication subsystem. + Local bool `protobuf:"varint,8,opt,name=local,proto3" json:"local,omitempty"` +} + +func (x *Alias) Reset() { + *x = Alias{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_identity_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Alias) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Alias) ProtoMessage() {} + +func (x *Alias) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_identity_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Alias.ProtoReflect.Descriptor instead. +func (*Alias) Descriptor() ([]byte, []int) { + return file_sdk_logical_identity_proto_rawDescGZIP(), []int{1} +} + +func (x *Alias) GetMountType() string { + if x != nil { + return x.MountType + } + return "" +} + +func (x *Alias) GetMountAccessor() string { + if x != nil { + return x.MountAccessor + } + return "" +} + +func (x *Alias) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Alias) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Alias) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Alias) GetNamespaceID() string { + if x != nil { + return x.NamespaceID + } + return "" +} + +func (x *Alias) GetCustomMetadata() map[string]string { + if x != nil { + return x.CustomMetadata + } + return nil +} + +func (x *Alias) GetLocal() bool { + if x != nil { + return x.Local + } + return false +} + +type Group struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID is the unique identifier for the group + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Name is the human-friendly unique identifier for the group + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Metadata represents the custom data tied to this group + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // NamespaceID is the identifier of the namespace to which this group + // belongs to. + NamespaceID string `protobuf:"bytes,4,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` +} + +func (x *Group) Reset() { + *x = Group{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_identity_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Group) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Group) ProtoMessage() {} + +func (x *Group) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_identity_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Group.ProtoReflect.Descriptor instead. +func (*Group) Descriptor() ([]byte, []int) { + return file_sdk_logical_identity_proto_rawDescGZIP(), []int{2} +} + +func (x *Group) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Group) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Group) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Group) GetNamespaceID() string { + if x != nil { + return x.NamespaceID + } + return "" +} + +type MFAMethodID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + UsesPasscode bool `protobuf:"varint,3,opt,name=uses_passcode,json=usesPasscode,proto3" json:"uses_passcode,omitempty"` +} + +func (x *MFAMethodID) Reset() { + *x = MFAMethodID{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_identity_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MFAMethodID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MFAMethodID) ProtoMessage() {} + +func (x *MFAMethodID) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_identity_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MFAMethodID.ProtoReflect.Descriptor instead. +func (*MFAMethodID) Descriptor() ([]byte, []int) { + return file_sdk_logical_identity_proto_rawDescGZIP(), []int{3} +} + +func (x *MFAMethodID) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *MFAMethodID) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *MFAMethodID) GetUsesPasscode() bool { + if x != nil { + return x.UsesPasscode + } + return false +} + +type MFAConstraintAny struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Any []*MFAMethodID `protobuf:"bytes,1,rep,name=any,proto3" json:"any,omitempty"` +} + +func (x *MFAConstraintAny) Reset() { + *x = MFAConstraintAny{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_identity_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MFAConstraintAny) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MFAConstraintAny) ProtoMessage() {} + +func (x *MFAConstraintAny) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_identity_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MFAConstraintAny.ProtoReflect.Descriptor instead. +func (*MFAConstraintAny) Descriptor() ([]byte, []int) { + return file_sdk_logical_identity_proto_rawDescGZIP(), []int{4} +} + +func (x *MFAConstraintAny) GetAny() []*MFAMethodID { + if x != nil { + return x.Any + } + return nil +} + +type MFARequirement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MFARequestID string `protobuf:"bytes,1,opt,name=mfa_request_id,json=mfaRequestId,proto3" json:"mfa_request_id,omitempty"` + MFAConstraints map[string]*MFAConstraintAny `protobuf:"bytes,2,rep,name=mfa_constraints,json=mfaConstraints,proto3" json:"mfa_constraints,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *MFARequirement) Reset() { + *x = MFARequirement{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_identity_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MFARequirement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MFARequirement) ProtoMessage() {} + +func (x *MFARequirement) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_identity_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MFARequirement.ProtoReflect.Descriptor instead. +func (*MFARequirement) Descriptor() ([]byte, []int) { + return file_sdk_logical_identity_proto_rawDescGZIP(), []int{5} +} + +func (x *MFARequirement) GetMFARequestID() string { + if x != nil { + return x.MFARequestID + } + return "" +} + +func (x *MFARequirement) GetMFAConstraints() map[string]*MFAConstraintAny { + if x != nil { + return x.MFAConstraints + } + return nil +} + +var File_sdk_logical_identity_proto protoreflect.FileDescriptor + +var file_sdk_logical_identity_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, + 0x67, 0x69, 0x63, 0x61, 0x6c, 0x22, 0x8d, 0x02, 0x0a, 0x06, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x39, + 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb1, 0x03, 0x0a, 0x05, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, + 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, + 0x0a, 0x0e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6c, 0x6f, + 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x49, 0x44, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x4b, 0x0a, 0x0f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x2e, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x41, 0x0a, 0x13, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc5, 0x01, 0x0a, 0x05, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6c, 0x6f, 0x67, 0x69, + 0x63, 0x61, 0x6c, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x49, 0x64, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x56, 0x0a, 0x0b, 0x4d, 0x46, 0x41, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x49, 0x44, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x65, 0x73, 0x5f, 0x70, 0x61, 0x73, + 0x73, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x75, 0x73, 0x65, + 0x73, 0x50, 0x61, 0x73, 0x73, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x3a, 0x0a, 0x10, 0x4d, 0x46, 0x41, + 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x79, 0x12, 0x26, 0x0a, + 0x03, 0x61, 0x6e, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6c, 0x6f, 0x67, + 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x49, 0x44, + 0x52, 0x03, 0x61, 0x6e, 0x79, 0x22, 0xea, 0x01, 0x0a, 0x0e, 0x4d, 0x46, 0x41, 0x52, 0x65, 0x71, + 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x66, 0x61, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x6d, 0x66, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x54, + 0x0a, 0x0f, 0x6d, 0x66, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, + 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x4d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x6d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, + 0x69, 0x6e, 0x74, 0x73, 0x1a, 0x5c, 0x0a, 0x13, 0x4d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74, + 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2f, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6c, + 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, + 0x61, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, + 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sdk_logical_identity_proto_rawDescOnce sync.Once + file_sdk_logical_identity_proto_rawDescData = file_sdk_logical_identity_proto_rawDesc +) + +func file_sdk_logical_identity_proto_rawDescGZIP() []byte { + file_sdk_logical_identity_proto_rawDescOnce.Do(func() { + file_sdk_logical_identity_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_identity_proto_rawDescData) + }) + return file_sdk_logical_identity_proto_rawDescData +} + +var file_sdk_logical_identity_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_sdk_logical_identity_proto_goTypes = []interface{}{ + (*Entity)(nil), // 0: logical.Entity + (*Alias)(nil), // 1: logical.Alias + (*Group)(nil), // 2: logical.Group + (*MFAMethodID)(nil), // 3: logical.MFAMethodID + (*MFAConstraintAny)(nil), // 4: logical.MFAConstraintAny + (*MFARequirement)(nil), // 5: logical.MFARequirement + nil, // 6: logical.Entity.MetadataEntry + nil, // 7: logical.Alias.MetadataEntry + nil, // 8: logical.Alias.CustomMetadataEntry + nil, // 9: logical.Group.MetadataEntry + nil, // 10: logical.MFARequirement.MFAConstraintsEntry +} +var file_sdk_logical_identity_proto_depIDxs = []int32{ + 1, // 0: logical.Entity.aliases:type_name -> logical.Alias + 6, // 1: logical.Entity.metadata:type_name -> logical.Entity.MetadataEntry + 7, // 2: logical.Alias.metadata:type_name -> logical.Alias.MetadataEntry + 8, // 3: logical.Alias.custom_metadata:type_name -> logical.Alias.CustomMetadataEntry + 9, // 4: logical.Group.metadata:type_name -> logical.Group.MetadataEntry + 3, // 5: logical.MFAConstraintAny.any:type_name -> logical.MFAMethodID + 10, // 6: logical.MFARequirement.mfa_constraints:type_name -> logical.MFARequirement.MFAConstraintsEntry + 4, // 7: logical.MFARequirement.MFAConstraintsEntry.value:type_name -> logical.MFAConstraintAny + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_sdk_logical_identity_proto_init() } +func file_sdk_logical_identity_proto_init() { + if File_sdk_logical_identity_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_logical_identity_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Entity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_identity_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Alias); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_identity_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Group); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_identity_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MFAMethodID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_identity_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MFAConstraintAny); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_identity_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MFARequirement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_logical_identity_proto_rawDesc, + NumEnums: 0, + NumMessages: 11, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sdk_logical_identity_proto_goTypes, + DependencyIndexes: file_sdk_logical_identity_proto_depIDxs, + MessageInfos: file_sdk_logical_identity_proto_msgTypes, + }.Build() + File_sdk_logical_identity_proto = out.File + file_sdk_logical_identity_proto_rawDesc = nil + file_sdk_logical_identity_proto_goTypes = nil + file_sdk_logical_identity_proto_depIDxs = nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/identity.proto b/vendor/github.com/hashicorp/vault/sdk/logical/identity.proto new file mode 100644 index 00000000000..ea2e373b18c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/identity.proto @@ -0,0 +1,91 @@ +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/sdk/logical"; + +package logical; + +message Entity { + // ID is the unique identifier for the entity + string ID = 1; + + // Name is the human-friendly unique identifier for the entity + string name = 2; + + // Aliases contains thhe alias mappings for the given entity + repeated Alias aliases = 3; + + // Metadata represents the custom data tied to this entity + map metadata = 4; + + // Disabled is true if the entity is disabled. + bool disabled = 5; + + // NamespaceID is the identifier of the namespace to which this entity + // belongs to. + string namespace_id = 6; +} + +message Alias { + // MountType is the backend mount's type to which this identity belongs + string mount_type = 1; + + // MountAccessor is the identifier of the mount entry to which this + // identity belongs + string mount_accessor = 2; + + // Name is the identifier of this identity in its authentication source + string name = 3; + + // Metadata represents the custom data tied to this alias. Fields added + // to it should have a low rate of change (or no change) because each + // change incurs a storage write, so quickly-changing fields can have + // a significant performance impact at scale. See the SDK's + // "aliasmetadata" package for a helper that eases and standardizes + // using this safely. + map metadata = 4; + + // ID is the unique identifier for the alias + string ID = 5; + + // NamespaceID is the identifier of the namespace to which this alias + // belongs. + string namespace_id = 6; + + // Custom Metadata represents the custom data tied to this alias + map custom_metadata = 7; + + // Local indicates if the alias only belongs to the cluster where it was + // created. If true, the alias will be stored in a location that are ignored + // by the performance replication subsystem. + bool local = 8; +} + +message Group { + // ID is the unique identifier for the group + string ID = 1; + + // Name is the human-friendly unique identifier for the group + string name = 2; + + // Metadata represents the custom data tied to this group + map metadata = 3; + + // NamespaceID is the identifier of the namespace to which this group + // belongs to. + string namespace_id = 4; +} + +message MFAMethodID { + string type = 1; + string id = 2; + bool uses_passcode = 3; +} + +message MFAConstraintAny { + repeated MFAMethodID any = 1; +} + +message MFARequirement { + string mfa_request_id = 1; + map mfa_constraints = 2; +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/lease.go b/vendor/github.com/hashicorp/vault/sdk/logical/lease.go new file mode 100644 index 00000000000..97bbe4f6582 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/lease.go @@ -0,0 +1,53 @@ +package logical + +import ( + "time" +) + +// LeaseOptions is an embeddable struct to capture common lease +// settings between a Secret and Auth +type LeaseOptions struct { + // TTL is the duration that this secret is valid for. Vault + // will automatically revoke it after the duration. + TTL time.Duration `json:"lease"` + + // MaxTTL is the maximum duration that this secret is valid for. + MaxTTL time.Duration `json:"max_ttl"` + + // Renewable, if true, means that this secret can be renewed. + Renewable bool `json:"renewable"` + + // Increment will be the lease increment that the user requested. + // This is only available on a Renew operation and has no effect + // when returning a response. + Increment time.Duration `json:"-"` + + // IssueTime is the time of issue for the original lease. This is + // only available on Renew and Revoke operations and has no effect when returning + // a response. It can be used to enforce maximum lease periods by + // a logical backend. + IssueTime time.Time `json:"-"` +} + +// LeaseEnabled checks if leasing is enabled +func (l *LeaseOptions) LeaseEnabled() bool { + return l.TTL > 0 +} + +// LeaseTotal is the lease duration with a guard against a negative TTL +func (l *LeaseOptions) LeaseTotal() time.Duration { + if l.TTL <= 0 { + return 0 + } + + return l.TTL +} + +// ExpirationTime computes the time until expiration including the grace period +func (l *LeaseOptions) ExpirationTime() time.Time { + var expireTime time.Time + if l.LeaseEnabled() { + expireTime = time.Now().Add(l.LeaseTotal()) + } + return expireTime +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/logical.go b/vendor/github.com/hashicorp/vault/sdk/logical/logical.go new file mode 100644 index 00000000000..601148952f0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/logical.go @@ -0,0 +1,156 @@ +package logical + +import ( + "context" + + log "github.com/hashicorp/go-hclog" +) + +// BackendType is the type of backend that is being implemented +type BackendType uint32 + +// The these are the types of backends that can be derived from +// logical.Backend +const ( + TypeUnknown BackendType = 0 // This is also the zero-value for BackendType + TypeLogical BackendType = 1 + TypeCredential BackendType = 2 +) + +// Stringer implementation +func (b BackendType) String() string { + switch b { + case TypeLogical: + return "secret" + case TypeCredential: + return "auth" + } + + return "unknown" +} + +// Backend interface must be implemented to be "mountable" at +// a given path. Requests flow through a router which has various mount +// points that flow to a logical backend. The logic of each backend is flexible, +// and this is what allows materialized keys to function. There can be specialized +// logical backends for various upstreams (Consul, PostgreSQL, MySQL, etc) that can +// interact with remote APIs to generate keys dynamically. This interface also +// allows for a "procfs" like interaction, as internal state can be exposed by +// acting like a logical backend and being mounted. +type Backend interface { + // Initialize is used to initialize a plugin after it has been mounted. + Initialize(context.Context, *InitializationRequest) error + + // HandleRequest is used to handle a request and generate a response. + // The backends must check the operation type and handle appropriately. + HandleRequest(context.Context, *Request) (*Response, error) + + // SpecialPaths is a list of paths that are special in some way. + // See PathType for the types of special paths. The key is the type + // of the special path, and the value is a list of paths for this type. + // This is not a regular expression but is an exact match. If the path + // ends in '*' then it is a prefix-based match. The '*' can only appear + // at the end. + SpecialPaths() *Paths + + // System provides an interface to access certain system configuration + // information, such as globally configured default and max lease TTLs. + System() SystemView + + // Logger provides an interface to access the underlying logger. This + // is useful when a struct embeds a Backend-implemented struct that + // contains a private instance of logger. + Logger() log.Logger + + // HandleExistenceCheck is used to handle a request and generate a response + // indicating whether the given path exists or not; this is used to + // understand whether the request must have a Create or Update capability + // ACL applied. The first bool indicates whether an existence check + // function was found for the backend; the second indicates whether, if an + // existence check function was found, the item exists or not. + HandleExistenceCheck(context.Context, *Request) (bool, bool, error) + + // Cleanup is invoked during an unmount of a backend to allow it to + // handle any cleanup like connection closing or releasing of file handles. + Cleanup(context.Context) + + // InvalidateKey may be invoked when an object is modified that belongs + // to the backend. The backend can use this to clear any caches or reset + // internal state as needed. + InvalidateKey(context.Context, string) + + // Setup is used to set up the backend based on the provided backend + // configuration. + Setup(context.Context, *BackendConfig) error + + // Type returns the BackendType for the particular backend + Type() BackendType +} + +// BackendConfig is provided to the factory to initialize the backend +type BackendConfig struct { + // View should not be stored, and should only be used for initialization + StorageView Storage + + // The backend should use this logger. The log should not contain any secrets. + Logger log.Logger + + // System provides a view into a subset of safe system information that + // is useful for backends, such as the default/max lease TTLs + System SystemView + + // BackendUUID is a unique identifier provided to this backend. It's useful + // when a backend needs a consistent and unique string without using storage. + BackendUUID string + + // Config is the opaque user configuration provided when mounting + Config map[string]string +} + +// Factory is the factory function to create a logical backend. +type Factory func(context.Context, *BackendConfig) (Backend, error) + +// Paths is the structure of special paths that is used for SpecialPaths. +type Paths struct { + // Root are the API paths that require a root token to access + Root []string + + // Unauthenticated are the API paths that can be accessed without any auth. + // These can't be regular expressions, it is either exact match, a prefix + // match and/or a wildcard match. For prefix match, append '*' as a suffix. + // For a wildcard match, use '+' in the segment to match any identifier + // (e.g. 'foo/+/bar'). Note that '+' can't be adjacent to a non-slash. + Unauthenticated []string + + // LocalStorage are storage paths (prefixes) that are local to this cluster; + // this indicates that these paths should not be replicated across performance clusters + // (DR replication is unaffected). + LocalStorage []string + + // SealWrapStorage are storage paths that, when using a capable seal, + // should be seal wrapped with extra encryption. It is exact matching + // unless it ends with '/' in which case it will be treated as a prefix. + SealWrapStorage []string +} + +type Auditor interface { + AuditRequest(ctx context.Context, input *LogInput) error + AuditResponse(ctx context.Context, input *LogInput) error +} + +// Externaler allows us to check if a backend is running externally (i.e., over GRPC) +type Externaler interface { + IsExternal() bool +} + +type PluginVersion struct { + Version string +} + +// PluginVersioner is an optional interface to return version info. +type PluginVersioner interface { + // PluginVersion returns the version for the backend + PluginVersion() PluginVersion +} + +var EmptyPluginVersion = PluginVersion{""} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go b/vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go new file mode 100644 index 00000000000..16b85cd797e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go @@ -0,0 +1,52 @@ +package logical + +import ( + "context" + + "github.com/hashicorp/vault/sdk/physical" +) + +type LogicalStorage struct { + underlying physical.Backend +} + +func (s *LogicalStorage) Get(ctx context.Context, key string) (*StorageEntry, error) { + entry, err := s.underlying.Get(ctx, key) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + return &StorageEntry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }, nil +} + +func (s *LogicalStorage) Put(ctx context.Context, entry *StorageEntry) error { + return s.underlying.Put(ctx, &physical.Entry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }) +} + +func (s *LogicalStorage) Delete(ctx context.Context, key string) error { + return s.underlying.Delete(ctx, key) +} + +func (s *LogicalStorage) List(ctx context.Context, prefix string) ([]string, error) { + return s.underlying.List(ctx, prefix) +} + +func (s *LogicalStorage) Underlying() physical.Backend { + return s.underlying +} + +func NewLogicalStorage(underlying physical.Backend) *LogicalStorage { + return &LogicalStorage{ + underlying: underlying, + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/managed_key.go b/vendor/github.com/hashicorp/vault/sdk/logical/managed_key.go new file mode 100644 index 00000000000..e892c9cce94 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/managed_key.go @@ -0,0 +1,97 @@ +package logical + +import ( + "context" + "crypto" + "crypto/cipher" + "io" +) + +type KeyUsage int + +const ( + KeyUsageEncrypt KeyUsage = 1 + iota + KeyUsageDecrypt + KeyUsageSign + KeyUsageVerify + KeyUsageWrap + KeyUsageUnwrap +) + +type ManagedKey interface { + // Name is a human-readable identifier for a managed key that may change/renamed. Use Uuid if a + // long term consistent identifier is needed. + Name() string + // UUID is a unique identifier for a managed key that is guaranteed to remain + // consistent even if a key is migrated or renamed. + UUID() string + // Present returns true if the key is established in the KMS. This may return false if for example + // an HSM library is not configured on all cluster nodes. + Present(ctx context.Context) (bool, error) + + // AllowsAll returns true if all the requested usages are supported by the managed key. + AllowsAll(usages []KeyUsage) bool +} + +type ( + ManagedKeyConsumer func(context.Context, ManagedKey) error + ManagedSigningKeyConsumer func(context.Context, ManagedSigningKey) error + ManagedEncryptingKeyConsumer func(context.Context, ManagedEncryptingKey) error +) + +type ManagedKeySystemView interface { + // WithManagedKeyByName retrieves an instantiated managed key for consumption by the given function. The + // provided key can only be used within the scope of that function call + WithManagedKeyByName(ctx context.Context, keyName, backendUUID string, f ManagedKeyConsumer) error + // WithManagedKeyByUUID retrieves an instantiated managed key for consumption by the given function. The + // provided key can only be used within the scope of that function call + WithManagedKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedKeyConsumer) error + + // WithManagedSigningKeyByName retrieves an instantiated managed signing key for consumption by the given function, + // with the same semantics as WithManagedKeyByName + WithManagedSigningKeyByName(ctx context.Context, keyName, backendUUID string, f ManagedSigningKeyConsumer) error + // WithManagedSigningKeyByUUID retrieves an instantiated managed signing key for consumption by the given function, + // with the same semantics as WithManagedKeyByUUID + WithManagedSigningKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedSigningKeyConsumer) error + // WithManagedSigningKeyByName retrieves an instantiated managed signing key for consumption by the given function, + // with the same semantics as WithManagedKeyByName + WithManagedEncryptingKeyByName(ctx context.Context, keyName, backendUUID string, f ManagedEncryptingKeyConsumer) error + // WithManagedSigningKeyByUUID retrieves an instantiated managed signing key for consumption by the given function, + // with the same semantics as WithManagedKeyByUUID + WithManagedEncryptingKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedEncryptingKeyConsumer) error +} + +type ManagedAsymmetricKey interface { + ManagedKey + GetPublicKey(ctx context.Context) (crypto.PublicKey, error) +} + +type ManagedKeyLifecycle interface { + // GenerateKey generates a key in the KMS if it didn't yet exist, returning the id. + // If it already existed, returns the existing id. KMSKey's key material is ignored if present. + GenerateKey(ctx context.Context) (string, error) +} + +type ManagedSigningKey interface { + ManagedAsymmetricKey + + // Sign returns a digital signature of the provided value. The SignerOpts param must provide the hash function + // that generated the value (if any). + // The optional randomSource specifies the source of random values and may be ignored by the implementation + // (such as on HSMs with their own internal RNG) + Sign(ctx context.Context, value []byte, randomSource io.Reader, opts crypto.SignerOpts) ([]byte, error) + + // Verify verifies the provided signature against the value. The SignerOpts param must provide the hash function + // that generated the value (if any). + // If true is returned the signature is correct, false otherwise. + Verify(ctx context.Context, signature, value []byte, opts crypto.SignerOpts) (bool, error) + + // GetSigner returns an implementation of crypto.Signer backed by the managed key. This should be called + // as needed so as to use per request contexts. + GetSigner(context.Context) (crypto.Signer, error) +} + +type ManagedEncryptingKey interface { + ManagedKey + GetAEAD(iv []byte) (cipher.AEAD, error) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go b/vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go new file mode 100644 index 00000000000..9be723e14be --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go @@ -0,0 +1,171 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.7 +// source: sdk/logical/plugin.proto + +package logical + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PluginEnvironment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // VaultVersion is the version of the Vault server + VaultVersion string `protobuf:"bytes,1,opt,name=vault_version,json=vaultVersion,proto3" json:"vault_version,omitempty"` + // VaultVersionPrerelease is the prerelease information of the Vault server + VaultVersionPrerelease string `protobuf:"bytes,2,opt,name=vault_version_prerelease,json=vaultVersionPrerelease,proto3" json:"vault_version_prerelease,omitempty"` + // VaultVersionMetadata is the version metadata of the Vault server + VaultVersionMetadata string `protobuf:"bytes,3,opt,name=vault_version_metadata,json=vaultVersionMetadata,proto3" json:"vault_version_metadata,omitempty"` +} + +func (x *PluginEnvironment) Reset() { + *x = PluginEnvironment{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_plugin_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PluginEnvironment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PluginEnvironment) ProtoMessage() {} + +func (x *PluginEnvironment) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_plugin_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PluginEnvironment.ProtoReflect.Descriptor instead. +func (*PluginEnvironment) Descriptor() ([]byte, []int) { + return file_sdk_logical_plugin_proto_rawDescGZIP(), []int{0} +} + +func (x *PluginEnvironment) GetVaultVersion() string { + if x != nil { + return x.VaultVersion + } + return "" +} + +func (x *PluginEnvironment) GetVaultVersionPrerelease() string { + if x != nil { + return x.VaultVersionPrerelease + } + return "" +} + +func (x *PluginEnvironment) GetVaultVersionMetadata() string { + if x != nil { + return x.VaultVersionMetadata + } + return "" +} + +var File_sdk_logical_plugin_proto protoreflect.FileDescriptor + +var file_sdk_logical_plugin_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, 0x67, 0x69, + 0x63, 0x61, 0x6c, 0x22, 0xa8, 0x01, 0x0a, 0x11, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, + 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x38, + 0x0a, 0x18, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x72, 0x65, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x16, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x65, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x76, 0x61, 0x75, 0x6c, + 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x28, + 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, + 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sdk_logical_plugin_proto_rawDescOnce sync.Once + file_sdk_logical_plugin_proto_rawDescData = file_sdk_logical_plugin_proto_rawDesc +) + +func file_sdk_logical_plugin_proto_rawDescGZIP() []byte { + file_sdk_logical_plugin_proto_rawDescOnce.Do(func() { + file_sdk_logical_plugin_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_plugin_proto_rawDescData) + }) + return file_sdk_logical_plugin_proto_rawDescData +} + +var file_sdk_logical_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_sdk_logical_plugin_proto_goTypes = []interface{}{ + (*PluginEnvironment)(nil), // 0: logical.PluginEnvironment +} +var file_sdk_logical_plugin_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_sdk_logical_plugin_proto_init() } +func file_sdk_logical_plugin_proto_init() { + if File_sdk_logical_plugin_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_logical_plugin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PluginEnvironment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_logical_plugin_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sdk_logical_plugin_proto_goTypes, + DependencyIndexes: file_sdk_logical_plugin_proto_depIdxs, + MessageInfos: file_sdk_logical_plugin_proto_msgTypes, + }.Build() + File_sdk_logical_plugin_proto = out.File + file_sdk_logical_plugin_proto_rawDesc = nil + file_sdk_logical_plugin_proto_goTypes = nil + file_sdk_logical_plugin_proto_depIdxs = nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto b/vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto new file mode 100644 index 00000000000..f2df6c75d97 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/sdk/logical"; + +package logical; + +message PluginEnvironment { + // VaultVersion is the version of the Vault server + string vault_version = 1; + + // VaultVersionPrerelease is the prerelease information of the Vault server + string vault_version_prerelease = 2; + + // VaultVersionMetadata is the version metadata of the Vault server + string vault_version_metadata = 3; +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/request.go b/vendor/github.com/hashicorp/vault/sdk/logical/request.go new file mode 100644 index 00000000000..d774fd176b4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/request.go @@ -0,0 +1,394 @@ +package logical + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/mitchellh/copystructure" +) + +// RequestWrapInfo is a struct that stores information about desired response +// and seal wrapping behavior +type RequestWrapInfo struct { + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl" sentinel:""` + + // The format to use for the wrapped response; if not specified it's a bare + // token + Format string `json:"format" structs:"format" mapstructure:"format" sentinel:""` + + // A flag to conforming backends that data for a given request should be + // seal wrapped + SealWrap bool `json:"seal_wrap" structs:"seal_wrap" mapstructure:"seal_wrap" sentinel:""` +} + +func (r *RequestWrapInfo) SentinelGet(key string) (interface{}, error) { + if r == nil { + return nil, nil + } + switch key { + case "ttl": + return r.TTL, nil + case "ttl_seconds": + return int64(r.TTL.Seconds()), nil + } + + return nil, nil +} + +func (r *RequestWrapInfo) SentinelKeys() []string { + return []string{ + "ttl", + "ttl_seconds", + } +} + +type ClientTokenSource uint32 + +const ( + NoClientToken ClientTokenSource = iota + ClientTokenFromVaultHeader + ClientTokenFromAuthzHeader +) + +type WALState struct { + ClusterID string + LocalIndex uint64 + ReplicatedIndex uint64 +} + +const indexStateCtxKey = "index_state" + +// IndexStateContext returns a context with an added value holding the index +// state that should be populated on writes. +func IndexStateContext(ctx context.Context, state *WALState) context.Context { + return context.WithValue(ctx, indexStateCtxKey, state) +} + +// IndexStateFromContext is a helper to look up if the provided context contains +// an index state pointer. +func IndexStateFromContext(ctx context.Context) *WALState { + s, ok := ctx.Value(indexStateCtxKey).(*WALState) + if !ok { + return nil + } + return s +} + +// Request is a struct that stores the parameters and context of a request +// being made to Vault. It is used to abstract the details of the higher level +// request protocol from the handlers. +// +// Note: Many of these have Sentinel disabled because they are values populated +// by the router after policy checks; the token namespace would be the right +// place to access them via Sentinel +type Request struct { + // Id is the uuid associated with each request + ID string `json:"id" structs:"id" mapstructure:"id" sentinel:""` + + // If set, the name given to the replication secondary where this request + // originated + ReplicationCluster string `json:"replication_cluster" structs:"replication_cluster" mapstructure:"replication_cluster" sentinel:""` + + // Operation is the requested operation type + Operation Operation `json:"operation" structs:"operation" mapstructure:"operation"` + + // Path is the full path of the request + Path string `json:"path" structs:"path" mapstructure:"path" sentinel:""` + + // Request data is an opaque map that must have string keys. + Data map[string]interface{} `json:"map" structs:"data" mapstructure:"data"` + + // Storage can be used to durably store and retrieve state. + Storage Storage `json:"-" sentinel:""` + + // Secret will be non-nil only for Revoke and Renew operations + // to represent the secret that was returned prior. + Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret" sentinel:""` + + // Auth will be non-nil only for Renew operations + // to represent the auth that was returned prior. + Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth" sentinel:""` + + // Headers will contain the http headers from the request. This value will + // be used in the audit broker to ensure we are auditing only the allowed + // headers. + Headers map[string][]string `json:"headers" structs:"headers" mapstructure:"headers" sentinel:""` + + // Connection will be non-nil only for credential providers to + // inspect the connection information and potentially use it for + // authentication/protection. + Connection *Connection `json:"connection" structs:"connection" mapstructure:"connection"` + + // ClientToken is provided to the core so that the identity + // can be verified and ACLs applied. This value is passed + // through to the logical backends but after being salted and + // hashed. + ClientToken string `json:"client_token" structs:"client_token" mapstructure:"client_token" sentinel:""` + + // ClientTokenAccessor is provided to the core so that the it can get + // logged as part of request audit logging. + ClientTokenAccessor string `json:"client_token_accessor" structs:"client_token_accessor" mapstructure:"client_token_accessor" sentinel:""` + + // DisplayName is provided to the logical backend to help associate + // dynamic secrets with the source entity. This is not a sensitive + // name, but is useful for operators. + DisplayName string `json:"display_name" structs:"display_name" mapstructure:"display_name" sentinel:""` + + // MountPoint is provided so that a logical backend can generate + // paths relative to itself. The `Path` is effectively the client + // request path with the MountPoint trimmed off. + MountPoint string `json:"mount_point" structs:"mount_point" mapstructure:"mount_point" sentinel:""` + + // MountType is provided so that a logical backend can make decisions + // based on the specific mount type (e.g., if a mount type has different + // aliases, generating different defaults depending on the alias) + MountType string `json:"mount_type" structs:"mount_type" mapstructure:"mount_type" sentinel:""` + + // MountAccessor is provided so that identities returned by the authentication + // backends can be tied to the mount it belongs to. + MountAccessor string `json:"mount_accessor" structs:"mount_accessor" mapstructure:"mount_accessor" sentinel:""` + + // WrapInfo contains requested response wrapping parameters + WrapInfo *RequestWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info" sentinel:""` + + // ClientTokenRemainingUses represents the allowed number of uses left on the + // token supplied + ClientTokenRemainingUses int `json:"client_token_remaining_uses" structs:"client_token_remaining_uses" mapstructure:"client_token_remaining_uses"` + + // EntityID is the identity of the caller extracted out of the token used + // to make this request + EntityID string `json:"entity_id" structs:"entity_id" mapstructure:"entity_id" sentinel:""` + + // PolicyOverride indicates that the requestor wishes to override + // soft-mandatory Sentinel policies + PolicyOverride bool `json:"policy_override" structs:"policy_override" mapstructure:"policy_override"` + + // Whether the request is unauthenticated, as in, had no client token + // attached. Useful in some situations where the client token is not made + // accessible. + Unauthenticated bool `json:"unauthenticated" structs:"unauthenticated" mapstructure:"unauthenticated"` + + // MFACreds holds the parsed MFA information supplied over the API as part of + // X-Vault-MFA header + MFACreds MFACreds `json:"mfa_creds" structs:"mfa_creds" mapstructure:"mfa_creds" sentinel:""` + + // Cached token entry. This avoids another lookup in request handling when + // we've already looked it up at http handling time. Note that this token + // has not been "used", as in it will not properly take into account use + // count limitations. As a result this field should only ever be used for + // transport to a function that would otherwise do a lookup and then + // properly use the token. + tokenEntry *TokenEntry + + // For replication, contains the last WAL on the remote side after handling + // the request, used for best-effort avoidance of stale read-after-write + lastRemoteWAL uint64 + + // ControlGroup holds the authorizations that have happened on this + // request + ControlGroup *ControlGroup `json:"control_group" structs:"control_group" mapstructure:"control_group" sentinel:""` + + // ClientTokenSource tells us where the client token was sourced from, so + // we can delete it before sending off to plugins + ClientTokenSource ClientTokenSource + + // HTTPRequest, if set, can be used to access fields from the HTTP request + // that generated this logical.Request object, such as the request body. + HTTPRequest *http.Request `json:"-" sentinel:""` + + // ResponseWriter if set can be used to stream a response value to the http + // request that generated this logical.Request object. + ResponseWriter *HTTPResponseWriter `json:"-" sentinel:""` + + // requiredState is used internally to propagate the X-Vault-Index request + // header to later levels of request processing that operate only on + // logical.Request. + requiredState []string + + // responseState is used internally to propagate the state that should appear + // in response headers; it's attached to the request rather than the response + // because not all requests yields non-nil responses. + responseState *WALState + + // ClientID is the identity of the caller. If the token is associated with an + // entity, it will be the same as the EntityID . If the token has no entity, + // this will be the sha256(sorted policies + namespace) associated with the + // client token. + ClientID string `json:"client_id" structs:"client_id" mapstructure:"client_id" sentinel:""` + + // InboundSSCToken is the token that arrives on an inbound request, supplied + // by the vault user. + InboundSSCToken string +} + +// Clone returns a deep copy of the request by using copystructure +func (r *Request) Clone() (*Request, error) { + cpy, err := copystructure.Copy(r) + if err != nil { + return nil, err + } + return cpy.(*Request), nil +} + +// Get returns a data field and guards for nil Data +func (r *Request) Get(key string) interface{} { + if r.Data == nil { + return nil + } + return r.Data[key] +} + +// GetString returns a data field as a string +func (r *Request) GetString(key string) string { + raw := r.Get(key) + s, _ := raw.(string) + return s +} + +func (r *Request) GoString() string { + return fmt.Sprintf("*%#v", *r) +} + +func (r *Request) SentinelGet(key string) (interface{}, error) { + switch key { + case "path": + // Sanitize it here so that it's consistent in policies + return strings.TrimPrefix(r.Path, "/"), nil + + case "wrapping", "wrap_info": + // If the pointer is nil accessing the wrap info is considered + // "undefined" so this allows us to instead discover a TTL of zero + if r.WrapInfo == nil { + return &RequestWrapInfo{}, nil + } + return r.WrapInfo, nil + } + + return nil, nil +} + +func (r *Request) SentinelKeys() []string { + return []string{ + "path", + "wrapping", + "wrap_info", + } +} + +func (r *Request) LastRemoteWAL() uint64 { + return r.lastRemoteWAL +} + +func (r *Request) SetLastRemoteWAL(last uint64) { + r.lastRemoteWAL = last +} + +func (r *Request) RequiredState() []string { + return r.requiredState +} + +func (r *Request) SetRequiredState(state []string) { + r.requiredState = state +} + +func (r *Request) ResponseState() *WALState { + return r.responseState +} + +func (r *Request) SetResponseState(w *WALState) { + r.responseState = w +} + +func (r *Request) TokenEntry() *TokenEntry { + return r.tokenEntry +} + +func (r *Request) SetTokenEntry(te *TokenEntry) { + r.tokenEntry = te +} + +// RenewRequest creates the structure of the renew request. +func RenewRequest(path string, secret *Secret, data map[string]interface{}) *Request { + return &Request{ + Operation: RenewOperation, + Path: path, + Data: data, + Secret: secret, + } +} + +// RenewAuthRequest creates the structure of the renew request for an auth. +func RenewAuthRequest(path string, auth *Auth, data map[string]interface{}) *Request { + return &Request{ + Operation: RenewOperation, + Path: path, + Data: data, + Auth: auth, + } +} + +// RevokeRequest creates the structure of the revoke request. +func RevokeRequest(path string, secret *Secret, data map[string]interface{}) *Request { + return &Request{ + Operation: RevokeOperation, + Path: path, + Data: data, + Secret: secret, + } +} + +// RollbackRequest creates the structure of the revoke request. +func RollbackRequest(path string) *Request { + return &Request{ + Operation: RollbackOperation, + Path: path, + Data: make(map[string]interface{}), + } +} + +// Operation is an enum that is used to specify the type +// of request being made +type Operation string + +const ( + // The operations below are called per path + CreateOperation Operation = "create" + ReadOperation = "read" + UpdateOperation = "update" + PatchOperation = "patch" + DeleteOperation = "delete" + ListOperation = "list" + HelpOperation = "help" + AliasLookaheadOperation = "alias-lookahead" + ResolveRoleOperation = "resolve-role" + + // The operations below are called globally, the path is less relevant. + RevokeOperation Operation = "revoke" + RenewOperation = "renew" + RollbackOperation = "rollback" +) + +type MFACreds map[string][]string + +// InitializationRequest stores the parameters and context of an Initialize() +// call being made to a logical.Backend. +type InitializationRequest struct { + // Storage can be used to durably store and retrieve state. + Storage Storage +} + +type CustomHeader struct { + Name string + Value string +} + +type CtxKeyInFlightRequestID struct{} + +func (c CtxKeyInFlightRequestID) String() string { + return "in-flight-request-ID" +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/response.go b/vendor/github.com/hashicorp/vault/sdk/logical/response.go new file mode 100644 index 00000000000..0f8a2210eca --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/response.go @@ -0,0 +1,322 @@ +package logical + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "strconv" + "sync/atomic" + + "github.com/hashicorp/vault/sdk/helper/wrapping" +) + +const ( + // HTTPContentType can be specified in the Data field of a Response + // so that the HTTP front end can specify a custom Content-Type associated + // with the HTTPRawBody. This can only be used for non-secrets, and should + // be avoided unless absolutely necessary, such as implementing a specification. + // The value must be a string. + HTTPContentType = "http_content_type" + + // HTTPRawBody is the raw content of the HTTP body that goes with the HTTPContentType. + // This can only be specified for non-secrets, and should should be similarly + // avoided like the HTTPContentType. The value must be a byte slice. + HTTPRawBody = "http_raw_body" + + // HTTPStatusCode is the response code of the HTTP body that goes with the HTTPContentType. + // This can only be specified for non-secrets, and should should be similarly + // avoided like the HTTPContentType. The value must be an integer. + HTTPStatusCode = "http_status_code" + + // For unwrapping we may need to know whether the value contained in the + // raw body is already JSON-unmarshaled. The presence of this key indicates + // that it has already been unmarshaled. That way we don't need to simply + // ignore errors. + HTTPRawBodyAlreadyJSONDecoded = "http_raw_body_already_json_decoded" + + // If set, HTTPCacheControlHeader will replace the default Cache-Control=no-store header + // set by the generic wrapping handler. The value must be a string. + HTTPCacheControlHeader = "http_raw_cache_control" + + // If set, HTTPPragmaHeader will set the Pragma response header. + // The value must be a string. + HTTPPragmaHeader = "http_raw_pragma" + + // If set, HTTPWWWAuthenticateHeader will set the WWW-Authenticate response header. + // The value must be a string. + HTTPWWWAuthenticateHeader = "http_www_authenticate" +) + +// Response is a struct that stores the response of a request. +// It is used to abstract the details of the higher level request protocol. +type Response struct { + // Secret, if not nil, denotes that this response represents a secret. + Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret"` + + // Auth, if not nil, contains the authentication information for + // this response. This is only checked and means something for + // credential backends. + Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth"` + + // Response data is an opaque map that must have string keys. For + // secrets, this data is sent down to the user as-is. To store internal + // data that you don't want the user to see, store it in + // Secret.InternalData. + Data map[string]interface{} `json:"data" structs:"data" mapstructure:"data"` + + // Redirect is an HTTP URL to redirect to for further authentication. + // This is only valid for credential backends. This will be blanked + // for any logical backend and ignored. + Redirect string `json:"redirect" structs:"redirect" mapstructure:"redirect"` + + // Warnings allow operations or backends to return warnings in response + // to user actions without failing the action outright. + Warnings []string `json:"warnings" structs:"warnings" mapstructure:"warnings"` + + // Information for wrapping the response in a cubbyhole + WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"` + + // Headers will contain the http headers from the plugin that it wishes to + // have as part of the output + Headers map[string][]string `json:"headers" structs:"headers" mapstructure:"headers"` +} + +// AddWarning adds a warning into the response's warning list +func (r *Response) AddWarning(warning string) { + if r.Warnings == nil { + r.Warnings = make([]string, 0, 1) + } + r.Warnings = append(r.Warnings, warning) +} + +// IsError returns true if this response seems to indicate an error. +func (r *Response) IsError() bool { + // If the response data contains only an 'error' element, or an 'error' and a 'data' element only + return r != nil && r.Data != nil && r.Data["error"] != nil && (len(r.Data) == 1 || (r.Data["data"] != nil && len(r.Data) == 2)) +} + +func (r *Response) Error() error { + if !r.IsError() { + return nil + } + switch r.Data["error"].(type) { + case string: + return errors.New(r.Data["error"].(string)) + case error: + return r.Data["error"].(error) + } + return nil +} + +// HelpResponse is used to format a help response +func HelpResponse(text string, seeAlso []string, oapiDoc interface{}) *Response { + return &Response{ + Data: map[string]interface{}{ + "help": text, + "see_also": seeAlso, + "openapi": oapiDoc, + }, + } +} + +// ErrorResponse is used to format an error response +func ErrorResponse(text string, vargs ...interface{}) *Response { + if len(vargs) > 0 { + text = fmt.Sprintf(text, vargs...) + } + return &Response{ + Data: map[string]interface{}{ + "error": text, + }, + } +} + +// ListResponse is used to format a response to a list operation. +func ListResponse(keys []string) *Response { + resp := &Response{ + Data: map[string]interface{}{}, + } + if len(keys) != 0 { + resp.Data["keys"] = keys + } + return resp +} + +// ListResponseWithInfo is used to format a response to a list operation and +// return the keys as well as a map with corresponding key info. +func ListResponseWithInfo(keys []string, keyInfo map[string]interface{}) *Response { + resp := ListResponse(keys) + + keyInfoData := make(map[string]interface{}) + for _, key := range keys { + val, ok := keyInfo[key] + if ok { + keyInfoData[key] = val + } + } + + if len(keyInfoData) > 0 { + resp.Data["key_info"] = keyInfoData + } + + return resp +} + +// RespondWithStatusCode takes a response and converts it to a raw response with +// the provided Status Code. +func RespondWithStatusCode(resp *Response, req *Request, code int) (*Response, error) { + ret := &Response{ + Data: map[string]interface{}{ + HTTPContentType: "application/json", + HTTPStatusCode: code, + }, + } + + if resp != nil { + httpResp := LogicalResponseToHTTPResponse(resp) + + if req != nil { + httpResp.RequestID = req.ID + } + + body, err := json.Marshal(httpResp) + if err != nil { + return nil, err + } + + // We default to string here so that the value is HMAC'd via audit. + // Since this function is always marshaling to JSON, this is + // appropriate. + ret.Data[HTTPRawBody] = string(body) + } + + return ret, nil +} + +// HTTPResponseWriter is optionally added to a request object and can be used to +// write directly to the HTTP response writer. +type HTTPResponseWriter struct { + http.ResponseWriter + written *uint32 +} + +// NewHTTPResponseWriter creates a new HTTPResponseWriter object that wraps the +// provided io.Writer. +func NewHTTPResponseWriter(w http.ResponseWriter) *HTTPResponseWriter { + return &HTTPResponseWriter{ + ResponseWriter: w, + written: new(uint32), + } +} + +// Write will write the bytes to the underlying io.Writer. +func (w *HTTPResponseWriter) Write(bytes []byte) (int, error) { + atomic.StoreUint32(w.written, 1) + return w.ResponseWriter.Write(bytes) +} + +// Written tells us if the writer has been written to yet. +func (w *HTTPResponseWriter) Written() bool { + return atomic.LoadUint32(w.written) == 1 +} + +type WrappingResponseWriter interface { + http.ResponseWriter + Wrapped() http.ResponseWriter +} + +type StatusHeaderResponseWriter struct { + wrapped http.ResponseWriter + wroteHeader bool + StatusCode int + headers map[string][]*CustomHeader +} + +func NewStatusHeaderResponseWriter(w http.ResponseWriter, h map[string][]*CustomHeader) *StatusHeaderResponseWriter { + return &StatusHeaderResponseWriter{ + wrapped: w, + wroteHeader: false, + StatusCode: 200, + headers: h, + } +} + +func (w *StatusHeaderResponseWriter) Wrapped() http.ResponseWriter { + return w.wrapped +} + +func (w *StatusHeaderResponseWriter) Header() http.Header { + return w.wrapped.Header() +} + +func (w *StatusHeaderResponseWriter) Write(buf []byte) (int, error) { + // It is allowed to only call ResponseWriter.Write and skip + // ResponseWriter.WriteHeader. An example of such a situation is + // "handleUIStub". The Write function will internally set the status code + // 200 for the response for which that call might invoke other + // implementations of the WriteHeader function. So, we still need to set + // the custom headers. In cases where both WriteHeader and Write of + // statusHeaderResponseWriter struct are called the internal call to the + // WriterHeader invoked from inside Write method won't change the headers. + if !w.wroteHeader { + w.setCustomResponseHeaders(w.StatusCode) + } + + return w.wrapped.Write(buf) +} + +func (w *StatusHeaderResponseWriter) WriteHeader(statusCode int) { + w.setCustomResponseHeaders(statusCode) + w.wrapped.WriteHeader(statusCode) + w.StatusCode = statusCode + // in cases where Write is called after WriteHeader, let's prevent setting + // ResponseWriter headers twice + w.wroteHeader = true +} + +func (w *StatusHeaderResponseWriter) setCustomResponseHeaders(status int) { + sch := w.headers + if sch == nil { + return + } + + // Checking the validity of the status code + if status >= 600 || status < 100 { + return + } + + // setter function to set the headers + setter := func(hvl []*CustomHeader) { + for _, hv := range hvl { + w.Header().Set(hv.Name, hv.Value) + } + } + + // Setting the default headers first + setter(sch["default"]) + + // setting the Xyy pattern first + d := fmt.Sprintf("%vxx", status/100) + if val, ok := sch[d]; ok { + setter(val) + } + + // Setting the specific headers + if val, ok := sch[strconv.Itoa(status)]; ok { + setter(val) + } + + return +} + +var _ WrappingResponseWriter = &StatusHeaderResponseWriter{} + +// ResolveRoleResponse returns a standard response to be returned by functions handling a ResolveRoleOperation +func ResolveRoleResponse(roleName string) (*Response, error) { + return &Response{ + Data: map[string]interface{}{ + "role": roleName, + }, + }, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/response_util.go b/vendor/github.com/hashicorp/vault/sdk/logical/response_util.go new file mode 100644 index 00000000000..4a9f61d563f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/response_util.go @@ -0,0 +1,204 @@ +package logical + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/helper/consts" +) + +// RespondErrorCommon pulls most of the functionality from http's +// respondErrorCommon and some of http's handleLogical and makes it available +// to both the http package and elsewhere. +func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) { + if err == nil && (resp == nil || !resp.IsError()) { + switch { + case req.Operation == ReadOperation: + if resp == nil { + return http.StatusNotFound, nil + } + + // Basically: if we have empty "keys" or no keys at all, 404. This + // provides consistency with GET. + case req.Operation == ListOperation && (resp == nil || resp.WrapInfo == nil): + if resp == nil { + return http.StatusNotFound, nil + } + if len(resp.Data) == 0 { + if len(resp.Warnings) > 0 { + return 0, nil + } + return http.StatusNotFound, nil + } + keysRaw, ok := resp.Data["keys"] + if !ok || keysRaw == nil { + // If we don't have keys but have other data, return as-is + if len(resp.Data) > 0 || len(resp.Warnings) > 0 { + return 0, nil + } + return http.StatusNotFound, nil + } + + var keys []string + switch keysRaw.(type) { + case []interface{}: + keys = make([]string, len(keysRaw.([]interface{}))) + for i, el := range keysRaw.([]interface{}) { + s, ok := el.(string) + if !ok { + return http.StatusInternalServerError, nil + } + keys[i] = s + } + + case []string: + keys = keysRaw.([]string) + default: + return http.StatusInternalServerError, nil + } + + if len(keys) == 0 { + return http.StatusNotFound, nil + } + } + + return 0, nil + } + + if errwrap.ContainsType(err, new(ReplicationCodedError)) { + var allErrors error + var codedErr *ReplicationCodedError + errwrap.Walk(err, func(inErr error) { + newErr, ok := inErr.(*ReplicationCodedError) + if ok { + codedErr = newErr + } else { + allErrors = multierror.Append(allErrors, inErr) + } + }) + if allErrors != nil { + return codedErr.Code, multierror.Append(fmt.Errorf("errors from both primary and secondary; primary error was %v; secondary errors follow", codedErr.Msg), allErrors) + } + return codedErr.Code, errors.New(codedErr.Msg) + } + + // Start out with internal server error since in most of these cases there + // won't be a response so this won't be overridden + statusCode := http.StatusInternalServerError + // If we actually have a response, start out with bad request + if resp != nil { + statusCode = http.StatusBadRequest + } + + // Now, check the error itself; if it has a specific logical error, set the + // appropriate code + if err != nil { + switch { + case errwrap.ContainsType(err, new(StatusBadRequest)): + statusCode = http.StatusBadRequest + case errwrap.Contains(err, ErrPermissionDenied.Error()): + statusCode = http.StatusForbidden + case errwrap.Contains(err, consts.ErrInvalidWrappingToken.Error()): + statusCode = http.StatusBadRequest + case errwrap.Contains(err, ErrUnsupportedOperation.Error()): + statusCode = http.StatusMethodNotAllowed + case errwrap.Contains(err, ErrUnsupportedPath.Error()): + statusCode = http.StatusNotFound + case errwrap.Contains(err, ErrInvalidRequest.Error()): + statusCode = http.StatusBadRequest + case errwrap.Contains(err, ErrUpstreamRateLimited.Error()): + statusCode = http.StatusBadGateway + case errwrap.Contains(err, ErrRateLimitQuotaExceeded.Error()): + statusCode = http.StatusTooManyRequests + case errwrap.Contains(err, ErrLeaseCountQuotaExceeded.Error()): + statusCode = http.StatusTooManyRequests + case errwrap.Contains(err, ErrMissingRequiredState.Error()): + statusCode = http.StatusPreconditionFailed + case errwrap.Contains(err, ErrPathFunctionalityRemoved.Error()): + statusCode = http.StatusNotFound + case errwrap.Contains(err, ErrRelativePath.Error()): + statusCode = http.StatusBadRequest + case errwrap.Contains(err, ErrInvalidCredentials.Error()): + statusCode = http.StatusBadRequest + } + } + + if resp != nil && resp.IsError() { + err = fmt.Errorf("%s", resp.Data["error"].(string)) + } + + return statusCode, err +} + +// AdjustErrorStatusCode adjusts the status that will be sent in error +// conditions in a way that can be shared across http's respondError and other +// locations. +func AdjustErrorStatusCode(status *int, err error) { + // Handle nested errors + if t, ok := err.(*multierror.Error); ok { + for _, e := range t.Errors { + AdjustErrorStatusCode(status, e) + } + } + + // Adjust status code when sealed + if errwrap.Contains(err, consts.ErrSealed.Error()) { + *status = http.StatusServiceUnavailable + } + + if errwrap.Contains(err, consts.ErrAPILocked.Error()) { + *status = http.StatusServiceUnavailable + } + + // Adjust status code on + if errwrap.Contains(err, "http: request body too large") { + *status = http.StatusRequestEntityTooLarge + } + + // Allow HTTPCoded error passthrough to specify a code + if t, ok := err.(HTTPCodedError); ok { + *status = t.Code() + } +} + +func RespondError(w http.ResponseWriter, status int, err error) { + AdjustErrorStatusCode(&status, err) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + + type ErrorResponse struct { + Errors []string `json:"errors"` + } + resp := &ErrorResponse{Errors: make([]string, 0, 1)} + if err != nil { + resp.Errors = append(resp.Errors, err.Error()) + } + + enc := json.NewEncoder(w) + enc.Encode(resp) +} + +func RespondErrorAndData(w http.ResponseWriter, status int, data interface{}, err error) { + AdjustErrorStatusCode(&status, err) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + + type ErrorAndDataResponse struct { + Errors []string `json:"errors"` + Data interface{} `json:"data""` + } + resp := &ErrorAndDataResponse{Errors: make([]string, 0, 1)} + if err != nil { + resp.Errors = append(resp.Errors, err.Error()) + } + resp.Data = data + + enc := json.NewEncoder(w) + enc.Encode(resp) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/secret.go b/vendor/github.com/hashicorp/vault/sdk/logical/secret.go new file mode 100644 index 00000000000..a2128d86899 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/secret.go @@ -0,0 +1,30 @@ +package logical + +import "fmt" + +// Secret represents the secret part of a response. +type Secret struct { + LeaseOptions + + // InternalData is JSON-encodable data that is stored with the secret. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + InternalData map[string]interface{} `json:"internal_data" sentinel:""` + + // LeaseID is the ID returned to the user to manage this secret. + // This is generated by Vault core. Any set value will be ignored. + // For requests, this will always be blank. + LeaseID string `sentinel:""` +} + +func (s *Secret) Validate() error { + if s.TTL < 0 { + return fmt.Errorf("ttl duration must not be less than zero") + } + + return nil +} + +func (s *Secret) GoString() string { + return fmt.Sprintf("*%#v", *s) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/storage.go b/vendor/github.com/hashicorp/vault/sdk/logical/storage.go new file mode 100644 index 00000000000..0802ad01a0f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/storage.go @@ -0,0 +1,158 @@ +package logical + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +// ErrReadOnly is returned when a backend does not support +// writing. This can be caused by a read-only replica or secondary +// cluster operation. +var ErrReadOnly = errors.New("cannot write to readonly storage") + +// ErrSetupReadOnly is returned when a write operation is attempted on a +// storage while the backend is still being setup. +var ErrSetupReadOnly = errors.New("cannot write to storage during setup") + +// Storage is the way that logical backends are able read/write data. +type Storage interface { + List(context.Context, string) ([]string, error) + Get(context.Context, string) (*StorageEntry, error) + Put(context.Context, *StorageEntry) error + Delete(context.Context, string) error +} + +// StorageEntry is the entry for an item in a Storage implementation. +type StorageEntry struct { + Key string + Value []byte + SealWrap bool +} + +// DecodeJSON decodes the 'Value' present in StorageEntry. +func (e *StorageEntry) DecodeJSON(out interface{}) error { + return jsonutil.DecodeJSON(e.Value, out) +} + +// StorageEntryJSON creates a StorageEntry with a JSON-encoded value. +func StorageEntryJSON(k string, v interface{}) (*StorageEntry, error) { + encodedBytes, err := jsonutil.EncodeJSON(v) + if err != nil { + return nil, errwrap.Wrapf("failed to encode storage entry: {{err}}", err) + } + + return &StorageEntry{ + Key: k, + Value: encodedBytes, + }, nil +} + +type ClearableView interface { + List(context.Context, string) ([]string, error) + Delete(context.Context, string) error +} + +// ScanView is used to scan all the keys in a view iteratively +func ScanView(ctx context.Context, view ClearableView, cb func(path string)) error { + frontier := []string{""} + for len(frontier) > 0 { + n := len(frontier) + current := frontier[n-1] + frontier = frontier[:n-1] + + // List the contents + contents, err := view.List(ctx, current) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("list failed at path %q: {{err}}", current), err) + } + + // Handle the contents in the directory + for _, c := range contents { + // Exit if the context has been canceled + if ctx.Err() != nil { + return ctx.Err() + } + fullPath := current + c + if strings.HasSuffix(c, "/") { + frontier = append(frontier, fullPath) + } else { + cb(fullPath) + } + } + } + return nil +} + +// CollectKeys is used to collect all the keys in a view +func CollectKeys(ctx context.Context, view ClearableView) ([]string, error) { + return CollectKeysWithPrefix(ctx, view, "") +} + +// CollectKeysWithPrefix is used to collect all the keys in a view with a given prefix string +func CollectKeysWithPrefix(ctx context.Context, view ClearableView, prefix string) ([]string, error) { + var keys []string + + cb := func(path string) { + if strings.HasPrefix(path, prefix) { + keys = append(keys, path) + } + } + + // Scan for all the keys + if err := ScanView(ctx, view, cb); err != nil { + return nil, err + } + return keys, nil +} + +// ClearView is used to delete all the keys in a view +func ClearView(ctx context.Context, view ClearableView) error { + return ClearViewWithLogging(ctx, view, nil) +} + +func ClearViewWithLogging(ctx context.Context, view ClearableView, logger hclog.Logger) error { + if view == nil { + return nil + } + + if logger == nil { + logger = hclog.NewNullLogger() + } + + // Collect all the keys + keys, err := CollectKeys(ctx, view) + if err != nil { + return err + } + + logger.Debug("clearing view", "total_keys", len(keys)) + + // Delete all the keys + var pctDone int + for idx, key := range keys { + // Rather than keep trying to do stuff with a canceled context, bail; + // storage will fail anyways + if ctx.Err() != nil { + return ctx.Err() + } + if err := view.Delete(ctx, key); err != nil { + return err + } + + newPctDone := idx * 100.0 / len(keys) + if int(newPctDone) > pctDone { + pctDone = int(newPctDone) + logger.Trace("view deletion progress", "percent", pctDone, "keys_deleted", idx) + } + } + + logger.Debug("view cleared") + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go b/vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go new file mode 100644 index 00000000000..65368a070fe --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go @@ -0,0 +1,87 @@ +package logical + +import ( + "context" + "sync" + + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/sdk/physical/inmem" +) + +// InmemStorage implements Storage and stores all data in memory. It is +// basically a straight copy of physical.Inmem, but it prevents backends from +// having to load all of physical's dependencies (which are legion) just to +// have some testing storage. +type InmemStorage struct { + underlying physical.Backend + once sync.Once +} + +func (s *InmemStorage) Get(ctx context.Context, key string) (*StorageEntry, error) { + s.once.Do(s.init) + + entry, err := s.underlying.Get(ctx, key) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + return &StorageEntry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }, nil +} + +func (s *InmemStorage) Put(ctx context.Context, entry *StorageEntry) error { + s.once.Do(s.init) + + return s.underlying.Put(ctx, &physical.Entry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }) +} + +func (s *InmemStorage) Delete(ctx context.Context, key string) error { + s.once.Do(s.init) + + return s.underlying.Delete(ctx, key) +} + +func (s *InmemStorage) List(ctx context.Context, prefix string) ([]string, error) { + s.once.Do(s.init) + + return s.underlying.List(ctx, prefix) +} + +func (s *InmemStorage) Underlying() *inmem.InmemBackend { + s.once.Do(s.init) + + return s.underlying.(*inmem.InmemBackend) +} + +func (s *InmemStorage) FailPut(fail bool) *InmemStorage { + s.Underlying().FailPut(fail) + return s +} + +func (s *InmemStorage) FailGet(fail bool) *InmemStorage { + s.Underlying().FailGet(fail) + return s +} + +func (s *InmemStorage) FailDelete(fail bool) *InmemStorage { + s.Underlying().FailDelete(fail) + return s +} + +func (s *InmemStorage) FailList(fail bool) *InmemStorage { + s.Underlying().FailList(fail) + return s +} + +func (s *InmemStorage) init() { + s.underlying, _ = inmem.NewInmem(nil, nil) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go b/vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go new file mode 100644 index 00000000000..2cd07715c2a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go @@ -0,0 +1,110 @@ +package logical + +import ( + "context" + "errors" + "strings" +) + +type StorageView struct { + storage Storage + prefix string +} + +var ErrRelativePath = errors.New("relative paths not supported") + +func NewStorageView(storage Storage, prefix string) *StorageView { + return &StorageView{ + storage: storage, + prefix: prefix, + } +} + +// logical.Storage impl. +func (s *StorageView) List(ctx context.Context, prefix string) ([]string, error) { + if err := s.SanityCheck(prefix); err != nil { + return nil, err + } + return s.storage.List(ctx, s.ExpandKey(prefix)) +} + +// logical.Storage impl. +func (s *StorageView) Get(ctx context.Context, key string) (*StorageEntry, error) { + if err := s.SanityCheck(key); err != nil { + return nil, err + } + entry, err := s.storage.Get(ctx, s.ExpandKey(key)) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + entry.Key = s.TruncateKey(entry.Key) + + return &StorageEntry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }, nil +} + +// logical.Storage impl. +func (s *StorageView) Put(ctx context.Context, entry *StorageEntry) error { + if entry == nil { + return errors.New("cannot write nil entry") + } + + if err := s.SanityCheck(entry.Key); err != nil { + return err + } + + expandedKey := s.ExpandKey(entry.Key) + + nested := &StorageEntry{ + Key: expandedKey, + Value: entry.Value, + SealWrap: entry.SealWrap, + } + + return s.storage.Put(ctx, nested) +} + +// logical.Storage impl. +func (s *StorageView) Delete(ctx context.Context, key string) error { + if err := s.SanityCheck(key); err != nil { + return err + } + + expandedKey := s.ExpandKey(key) + + return s.storage.Delete(ctx, expandedKey) +} + +func (s *StorageView) Prefix() string { + return s.prefix +} + +// SubView constructs a nested sub-view using the given prefix +func (s *StorageView) SubView(prefix string) *StorageView { + sub := s.ExpandKey(prefix) + return &StorageView{storage: s.storage, prefix: sub} +} + +// SanityCheck is used to perform a sanity check on a key +func (s *StorageView) SanityCheck(key string) error { + if strings.Contains(key, "..") { + return ErrRelativePath + } + return nil +} + +// ExpandKey is used to expand to the full key path with the prefix +func (s *StorageView) ExpandKey(suffix string) string { + return s.prefix + suffix +} + +// TruncateKey is used to remove the prefix of the key +func (s *StorageView) TruncateKey(full string) string { + return strings.TrimPrefix(full, s.prefix) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/system_view.go b/vendor/github.com/hashicorp/vault/sdk/logical/system_view.go new file mode 100644 index 00000000000..4e5627b1c88 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/system_view.go @@ -0,0 +1,235 @@ +package logical + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/license" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/helper/wrapping" +) + +// SystemView exposes system configuration information in a safe way +// for logical backends to consume +type SystemView interface { + // DefaultLeaseTTL returns the default lease TTL set in Vault configuration + DefaultLeaseTTL() time.Duration + + // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend + // authors should take care not to issue credentials that last longer than + // this value, as Vault will revoke them + MaxLeaseTTL() time.Duration + + // Returns true if the mount is tainted. A mount is tainted if it is in the + // process of being unmounted. This should only be used in special + // circumstances; a primary use-case is as a guard in revocation functions. + // If revocation of a backend's leases fails it can keep the unmounting + // process from being successful. If the reason for this failure is not + // relevant when the mount is tainted (for instance, saving a CRL to disk + // when the stored CRL will be removed during the unmounting process + // anyways), we can ignore the errors to allow unmounting to complete. + Tainted() bool + + // Returns true if caching is disabled. If true, no caches should be used, + // despite known slowdowns. + CachingDisabled() bool + + // When run from a system view attached to a request, indicates whether the + // request is affecting a local mount or not + LocalMount() bool + + // ReplicationState indicates the state of cluster replication + ReplicationState() consts.ReplicationState + + // HasFeature returns true if the feature is currently enabled + HasFeature(feature license.Features) bool + + // ResponseWrapData wraps the given data in a cubbyhole and returns the + // token used to unwrap. + ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) + + // LookupPlugin looks into the plugin catalog for a plugin with the given + // name. Returns a PluginRunner or an error if a plugin can not be found. + LookupPlugin(ctx context.Context, pluginName string, pluginType consts.PluginType) (*pluginutil.PluginRunner, error) + + // LookupPluginVersion looks into the plugin catalog for a plugin with the given + // name and version. Returns a PluginRunner or an error if a plugin can not be found. + LookupPluginVersion(ctx context.Context, pluginName string, pluginType consts.PluginType, version string) (*pluginutil.PluginRunner, error) + + // ListVersionedPlugins returns information about all plugins of a certain + // type in the catalog, including any versioning information stored for them. + ListVersionedPlugins(ctx context.Context, pluginType consts.PluginType) ([]pluginutil.VersionedPlugin, error) + + // NewPluginClient returns a client for managing the lifecycle of plugin + // processes + NewPluginClient(ctx context.Context, config pluginutil.PluginClientConfig) (pluginutil.PluginClient, error) + + // MlockEnabled returns the configuration setting for enabling mlock on + // plugins. + MlockEnabled() bool + + // EntityInfo returns a subset of information related to the identity entity + // for the given entity id + EntityInfo(entityID string) (*Entity, error) + + // GroupsForEntity returns the group membership information for the provided + // entity id + GroupsForEntity(entityID string) ([]*Group, error) + + // PluginEnv returns Vault environment information used by plugins + PluginEnv(context.Context) (*PluginEnvironment, error) + + // GeneratePasswordFromPolicy generates a password from the policy referenced. + // If the policy does not exist, this will return an error. + GeneratePasswordFromPolicy(ctx context.Context, policyName string) (password string, err error) +} + +type PasswordPolicy interface { + // Generate a random password + Generate(context.Context, io.Reader) (string, error) +} + +type ExtendedSystemView interface { + Auditor() Auditor + ForwardGenericRequest(context.Context, *Request) (*Response, error) +} + +type PasswordGenerator func() (password string, err error) + +type StaticSystemView struct { + DefaultLeaseTTLVal time.Duration + MaxLeaseTTLVal time.Duration + SudoPrivilegeVal bool + TaintedVal bool + CachingDisabledVal bool + Primary bool + EnableMlock bool + LocalMountVal bool + ReplicationStateVal consts.ReplicationState + EntityVal *Entity + GroupsVal []*Group + Features license.Features + VaultVersion string + PluginEnvironment *PluginEnvironment + PasswordPolicies map[string]PasswordGenerator +} + +type noopAuditor struct{} + +func (a noopAuditor) AuditRequest(ctx context.Context, input *LogInput) error { + return nil +} + +func (a noopAuditor) AuditResponse(ctx context.Context, input *LogInput) error { + return nil +} + +func (d StaticSystemView) Auditor() Auditor { + return noopAuditor{} +} + +func (d StaticSystemView) ForwardGenericRequest(ctx context.Context, req *Request) (*Response, error) { + return nil, errors.New("ForwardGenericRequest is not implemented in StaticSystemView") +} + +func (d StaticSystemView) DefaultLeaseTTL() time.Duration { + return d.DefaultLeaseTTLVal +} + +func (d StaticSystemView) MaxLeaseTTL() time.Duration { + return d.MaxLeaseTTLVal +} + +func (d StaticSystemView) SudoPrivilege(_ context.Context, path string, token string) bool { + return d.SudoPrivilegeVal +} + +func (d StaticSystemView) Tainted() bool { + return d.TaintedVal +} + +func (d StaticSystemView) CachingDisabled() bool { + return d.CachingDisabledVal +} + +func (d StaticSystemView) LocalMount() bool { + return d.LocalMountVal +} + +func (d StaticSystemView) ReplicationState() consts.ReplicationState { + return d.ReplicationStateVal +} + +func (d StaticSystemView) NewPluginClient(ctx context.Context, config pluginutil.PluginClientConfig) (pluginutil.PluginClient, error) { + return nil, errors.New("NewPluginClient is not implemented in StaticSystemView") +} + +func (d StaticSystemView) ResponseWrapData(_ context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) { + return nil, errors.New("ResponseWrapData is not implemented in StaticSystemView") +} + +func (d StaticSystemView) LookupPlugin(_ context.Context, _ string, _ consts.PluginType) (*pluginutil.PluginRunner, error) { + return nil, errors.New("LookupPlugin is not implemented in StaticSystemView") +} + +func (d StaticSystemView) LookupPluginVersion(_ context.Context, _ string, _ consts.PluginType, _ string) (*pluginutil.PluginRunner, error) { + return nil, errors.New("LookupPluginVersion is not implemented in StaticSystemView") +} + +func (d StaticSystemView) ListVersionedPlugins(_ context.Context, _ consts.PluginType) ([]pluginutil.VersionedPlugin, error) { + return nil, errors.New("ListVersionedPlugins is not implemented in StaticSystemView") +} + +func (d StaticSystemView) MlockEnabled() bool { + return d.EnableMlock +} + +func (d StaticSystemView) EntityInfo(entityID string) (*Entity, error) { + return d.EntityVal, nil +} + +func (d StaticSystemView) GroupsForEntity(entityID string) ([]*Group, error) { + return d.GroupsVal, nil +} + +func (d StaticSystemView) HasFeature(feature license.Features) bool { + return d.Features.HasFeature(feature) +} + +func (d StaticSystemView) PluginEnv(_ context.Context) (*PluginEnvironment, error) { + return d.PluginEnvironment, nil +} + +func (d StaticSystemView) GeneratePasswordFromPolicy(ctx context.Context, policyName string) (password string, err error) { + select { + case <-ctx.Done(): + return "", fmt.Errorf("context timed out") + default: + } + + if d.PasswordPolicies == nil { + return "", fmt.Errorf("password policy not found") + } + policy, exists := d.PasswordPolicies[policyName] + if !exists { + return "", fmt.Errorf("password policy not found") + } + return policy() +} + +func (d *StaticSystemView) SetPasswordPolicy(name string, generator PasswordGenerator) { + if d.PasswordPolicies == nil { + d.PasswordPolicies = map[string]PasswordGenerator{} + } + d.PasswordPolicies[name] = generator +} + +func (d *StaticSystemView) DeletePasswordPolicy(name string) (existed bool) { + _, existed = d.PasswordPolicies[name] + delete(d.PasswordPolicies, name) + return existed +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/testing.go b/vendor/github.com/hashicorp/vault/sdk/logical/testing.go new file mode 100644 index 00000000000..765f09826d4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/testing.go @@ -0,0 +1,87 @@ +package logical + +import ( + "context" + "reflect" + "time" + + testing "github.com/mitchellh/go-testing-interface" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +// TestRequest is a helper to create a purely in-memory Request struct. +func TestRequest(t testing.T, op Operation, path string) *Request { + return &Request{ + Operation: op, + Path: path, + Data: make(map[string]interface{}), + Storage: new(InmemStorage), + Connection: &Connection{}, + } +} + +// TestStorage is a helper that can be used from unit tests to verify +// the behavior of a Storage impl. +func TestStorage(t testing.T, s Storage) { + keys, err := s.List(context.Background(), "") + if err != nil { + t.Fatalf("list error: %s", err) + } + if len(keys) > 0 { + t.Fatalf("should have no keys to start: %#v", keys) + } + + entry := &StorageEntry{Key: "foo", Value: []byte("bar")} + if err := s.Put(context.Background(), entry); err != nil { + t.Fatalf("put error: %s", err) + } + + actual, err := s.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("get error: %s", err) + } + if !reflect.DeepEqual(actual, entry) { + t.Fatalf("wrong value. Expected: %#v\nGot: %#v", entry, actual) + } + + keys, err = s.List(context.Background(), "") + if err != nil { + t.Fatalf("list error: %s", err) + } + if !reflect.DeepEqual(keys, []string{"foo"}) { + t.Fatalf("bad keys: %#v", keys) + } + + if err := s.Delete(context.Background(), "foo"); err != nil { + t.Fatalf("put error: %s", err) + } + + keys, err = s.List(context.Background(), "") + if err != nil { + t.Fatalf("list error: %s", err) + } + if len(keys) > 0 { + t.Fatalf("should have no keys to start: %#v", keys) + } +} + +func TestSystemView() *StaticSystemView { + defaultLeaseTTLVal := time.Hour * 24 + maxLeaseTTLVal := time.Hour * 24 * 2 + return &StaticSystemView{ + DefaultLeaseTTLVal: defaultLeaseTTLVal, + MaxLeaseTTLVal: maxLeaseTTLVal, + } +} + +func TestBackendConfig() *BackendConfig { + bc := &BackendConfig{ + Logger: logging.NewVaultLogger(log.Trace), + System: TestSystemView(), + Config: make(map[string]string), + } + + return bc +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/token.go b/vendor/github.com/hashicorp/vault/sdk/logical/token.go new file mode 100644 index 00000000000..ebebd4ad9ca --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/token.go @@ -0,0 +1,304 @@ +package logical + +import ( + "crypto/sha256" + "encoding/base64" + "fmt" + "sort" + "strings" + "time" + + sockaddr "github.com/hashicorp/go-sockaddr" +) + +type TokenType uint8 + +const ( + // TokenTypeDefault means "use the default, if any, that is currently set + // on the mount". If not set, results in a Service token. + TokenTypeDefault TokenType = iota + + // TokenTypeService is a "normal" Vault token for long-lived services + TokenTypeService + + // TokenTypeBatch is a batch token + TokenTypeBatch + + // TokenTypeDefaultService configured on a mount, means that if + // TokenTypeDefault is sent back by the mount, create Service tokens + TokenTypeDefaultService + + // TokenTypeDefaultBatch configured on a mount, means that if + // TokenTypeDefault is sent back by the mount, create Batch tokens + TokenTypeDefaultBatch + + // ClientIDTWEDelimiter Delimiter between the string fields used to generate a client + // ID for tokens without entities. This is the 0 character, which + // is a non-printable string. Please see unicode.IsPrint for details. + ClientIDTWEDelimiter = rune('\x00') + + // SortedPoliciesTWEDelimiter Delimiter between each policy in the sorted policies used to + // generate a client ID for tokens without entities. This is the 127 + // character, which is a non-printable string. Please see unicode.IsPrint + // for details. + SortedPoliciesTWEDelimiter = rune('\x7F') +) + +func (t *TokenType) UnmarshalJSON(b []byte) error { + if len(b) == 1 { + *t = TokenType(b[0] - '0') + return nil + } + + // Handle upgrade from pre-1.2 where we were serialized as string: + s := string(b) + switch s { + case `"default"`, `""`: + *t = TokenTypeDefault + case `"service"`: + *t = TokenTypeService + case `"batch"`: + *t = TokenTypeBatch + case `"default-service"`: + *t = TokenTypeDefaultService + case `"default-batch"`: + *t = TokenTypeDefaultBatch + default: + return fmt.Errorf("unknown token type %q", s) + } + return nil +} + +func (t TokenType) String() string { + switch t { + case TokenTypeDefault: + return "default" + case TokenTypeService: + return "service" + case TokenTypeBatch: + return "batch" + case TokenTypeDefaultService: + return "default-service" + case TokenTypeDefaultBatch: + return "default-batch" + default: + panic("unreachable") + } +} + +// TokenEntry is used to represent a given token +type TokenEntry struct { + Type TokenType `json:"type" mapstructure:"type" structs:"type" sentinel:""` + + // ID of this entry, generally a random UUID + ID string `json:"id" mapstructure:"id" structs:"id" sentinel:""` + + // ExternalID is the ID of a newly created service + // token that will be returned to a user + ExternalID string `json:"-"` + + // Accessor for this token, a random UUID + Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor" sentinel:""` + + // Parent token, used for revocation trees + Parent string `json:"parent" mapstructure:"parent" structs:"parent" sentinel:""` + + // Which named policies should be used + Policies []string `json:"policies" mapstructure:"policies" structs:"policies"` + + // InlinePolicy specifies ACL rules to be applied to this token entry. + InlinePolicy string `json:"inline_policy" mapstructure:"inline_policy" structs:"inline_policy"` + + // Used for audit trails, this is something like "auth/user/login" + Path string `json:"path" mapstructure:"path" structs:"path"` + + // Used for auditing. This could include things like "source", "user", "ip" + Meta map[string]string `json:"meta" mapstructure:"meta" structs:"meta" sentinel:"meta"` + + // InternalMeta is used to store internal metadata. This metadata will not be audit logged or returned from lookup APIs. + InternalMeta map[string]string `json:"internal_meta" mapstructure:"internal_meta" structs:"internal_meta"` + + // Used for operators to be able to associate with the source + DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"` + + // Used to restrict the number of uses (zero is unlimited). This is to + // support one-time-tokens (generalized). There are a few special values: + // if it's -1 it has run through its use counts and is executing its final + // use; if it's -2 it is tainted, which means revocation is currently + // running on it; and if it's -3 it's also tainted but revocation + // previously ran and failed, so this hints the tidy function to try it + // again. + NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"` + + // Time of token creation + CreationTime int64 `json:"creation_time" mapstructure:"creation_time" structs:"creation_time" sentinel:""` + + // Duration set when token was created + TTL time.Duration `json:"ttl" mapstructure:"ttl" structs:"ttl" sentinel:""` + + // Explicit maximum TTL on the token + ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl" sentinel:""` + + // If set, the role that was used for parameters at creation time + Role string `json:"role" mapstructure:"role" structs:"role"` + + // If set, the period of the token. This is only used when created directly + // through the create endpoint; periods managed by roles or other auth + // backends are subject to those renewal rules. + Period time.Duration `json:"period" mapstructure:"period" structs:"period" sentinel:""` + + // These are the deprecated fields + DisplayNameDeprecated string `json:"DisplayName" mapstructure:"DisplayName" structs:"DisplayName" sentinel:""` + NumUsesDeprecated int `json:"NumUses" mapstructure:"NumUses" structs:"NumUses" sentinel:""` + CreationTimeDeprecated int64 `json:"CreationTime" mapstructure:"CreationTime" structs:"CreationTime" sentinel:""` + ExplicitMaxTTLDeprecated time.Duration `json:"ExplicitMaxTTL" mapstructure:"ExplicitMaxTTL" structs:"ExplicitMaxTTL" sentinel:""` + + // EntityID is the ID of the entity associated with this token. + EntityID string `json:"entity_id" mapstructure:"entity_id" structs:"entity_id"` + + // If NoIdentityPolicies is true, the token will not inherit + // identity policies from the associated EntityID. + NoIdentityPolicies bool `json:"no_identity_policies" mapstructure:"no_identity_policies" structs:"no_identity_policies"` + + // The set of CIDRs that this token can be used with + BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs" sentinel:""` + + // NamespaceID is the identifier of the namespace to which this token is + // confined to. Do not return this value over the API when the token is + // being looked up. + NamespaceID string `json:"namespace_id" mapstructure:"namespace_id" structs:"namespace_id" sentinel:""` + + // CubbyholeID is the identifier of the cubbyhole storage belonging to this + // token + CubbyholeID string `json:"cubbyhole_id" mapstructure:"cubbyhole_id" structs:"cubbyhole_id" sentinel:""` +} + +// CreateClientID returns the client ID, and a boolean which is false if the clientID +// has an entity, and true otherwise +func (te *TokenEntry) CreateClientID() (string, bool) { + var clientIDInputBuilder strings.Builder + + // if entry has an associated entity ID, return it + if te.EntityID != "" { + return te.EntityID, false + } + + // The entry is associated with a TWE (token without entity). In this case + // we must create a client ID by calculating the following formula: + // clientID = SHA256(sorted policies + namespace) + + // Step 1: Copy entry policies to a new struct + sortedPolicies := make([]string, len(te.Policies)) + copy(sortedPolicies, te.Policies) + + // Step 2: Sort and join copied policies + sort.Strings(sortedPolicies) + for _, pol := range sortedPolicies { + clientIDInputBuilder.WriteRune(SortedPoliciesTWEDelimiter) + clientIDInputBuilder.WriteString(pol) + } + + // Step 3: Add namespace ID + clientIDInputBuilder.WriteRune(ClientIDTWEDelimiter) + clientIDInputBuilder.WriteString(te.NamespaceID) + + if clientIDInputBuilder.Len() == 0 { + return "", true + } + // Step 4: Remove the first character in the string, as it's an unnecessary delimiter + clientIDInput := clientIDInputBuilder.String()[1:] + + // Step 5: Hash the sum + hashed := sha256.Sum256([]byte(clientIDInput)) + return base64.StdEncoding.EncodeToString(hashed[:]), true +} + +func (te *TokenEntry) SentinelGet(key string) (interface{}, error) { + if te == nil { + return nil, nil + } + switch key { + case "policies": + return te.Policies, nil + + case "path": + return te.Path, nil + + case "display_name": + return te.DisplayName, nil + + case "num_uses": + return te.NumUses, nil + + case "role": + return te.Role, nil + + case "entity_id": + return te.EntityID, nil + + case "period": + return te.Period, nil + + case "period_seconds": + return int64(te.Period.Seconds()), nil + + case "explicit_max_ttl": + return te.ExplicitMaxTTL, nil + + case "explicit_max_ttl_seconds": + return int64(te.ExplicitMaxTTL.Seconds()), nil + + case "creation_ttl": + return te.TTL, nil + + case "creation_ttl_seconds": + return int64(te.TTL.Seconds()), nil + + case "creation_time": + return time.Unix(te.CreationTime, 0).Format(time.RFC3339Nano), nil + + case "creation_time_unix": + return time.Unix(te.CreationTime, 0), nil + + case "meta", "metadata": + return te.Meta, nil + + case "type": + teType := te.Type + switch teType { + case TokenTypeBatch, TokenTypeService: + case TokenTypeDefault: + teType = TokenTypeService + default: + return "unknown", nil + } + return teType.String(), nil + } + + return nil, nil +} + +func (te *TokenEntry) SentinelKeys() []string { + return []string{ + "period", + "period_seconds", + "explicit_max_ttl", + "explicit_max_ttl_seconds", + "creation_ttl", + "creation_ttl_seconds", + "creation_time", + "creation_time_unix", + "meta", + "metadata", + "type", + } +} + +// IsRoot returns false if the token is not root (or doesn't exist) +func (te *TokenEntry) IsRoot() bool { + if te == nil { + return false + } + + return len(te.Policies) == 1 && te.Policies[0] == "root" +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go b/vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go new file mode 100644 index 00000000000..de5ea8fdbe2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go @@ -0,0 +1,161 @@ +package logical + +import ( + "bytes" + "encoding/json" + "fmt" + "time" +) + +// This logic was pulled from the http package so that it can be used for +// encoding wrapped responses as well. It simply translates the logical +// response to an http response, with the values we want and omitting the +// values we don't. +func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse { + httpResp := &HTTPResponse{ + Data: input.Data, + Warnings: input.Warnings, + Headers: input.Headers, + } + + if input.Secret != nil { + httpResp.LeaseID = input.Secret.LeaseID + httpResp.Renewable = input.Secret.Renewable + httpResp.LeaseDuration = int(input.Secret.TTL.Seconds()) + } + + // If we have authentication information, then + // set up the result structure. + if input.Auth != nil { + httpResp.Auth = &HTTPAuth{ + ClientToken: input.Auth.ClientToken, + Accessor: input.Auth.Accessor, + Policies: input.Auth.Policies, + TokenPolicies: input.Auth.TokenPolicies, + IdentityPolicies: input.Auth.IdentityPolicies, + Metadata: input.Auth.Metadata, + LeaseDuration: int(input.Auth.TTL.Seconds()), + Renewable: input.Auth.Renewable, + EntityID: input.Auth.EntityID, + TokenType: input.Auth.TokenType.String(), + Orphan: input.Auth.Orphan, + MFARequirement: input.Auth.MFARequirement, + NumUses: input.Auth.NumUses, + } + } + + return httpResp +} + +func HTTPResponseToLogicalResponse(input *HTTPResponse) *Response { + logicalResp := &Response{ + Data: input.Data, + Warnings: input.Warnings, + Headers: input.Headers, + } + + if input.LeaseID != "" { + logicalResp.Secret = &Secret{ + LeaseID: input.LeaseID, + } + logicalResp.Secret.Renewable = input.Renewable + logicalResp.Secret.TTL = time.Second * time.Duration(input.LeaseDuration) + } + + if input.Auth != nil { + logicalResp.Auth = &Auth{ + ClientToken: input.Auth.ClientToken, + Accessor: input.Auth.Accessor, + Policies: input.Auth.Policies, + TokenPolicies: input.Auth.TokenPolicies, + IdentityPolicies: input.Auth.IdentityPolicies, + Metadata: input.Auth.Metadata, + EntityID: input.Auth.EntityID, + Orphan: input.Auth.Orphan, + } + logicalResp.Auth.Renewable = input.Auth.Renewable + logicalResp.Auth.TTL = time.Second * time.Duration(input.Auth.LeaseDuration) + switch input.Auth.TokenType { + case "service": + logicalResp.Auth.TokenType = TokenTypeService + case "batch": + logicalResp.Auth.TokenType = TokenTypeBatch + } + } + + return logicalResp +} + +type HTTPResponse struct { + RequestID string `json:"request_id"` + LeaseID string `json:"lease_id"` + Renewable bool `json:"renewable"` + LeaseDuration int `json:"lease_duration"` + Data map[string]interface{} `json:"data"` + WrapInfo *HTTPWrapInfo `json:"wrap_info"` + Warnings []string `json:"warnings"` + Headers map[string][]string `json:"-"` + Auth *HTTPAuth `json:"auth"` +} + +type HTTPAuth struct { + ClientToken string `json:"client_token"` + Accessor string `json:"accessor"` + Policies []string `json:"policies"` + TokenPolicies []string `json:"token_policies,omitempty"` + IdentityPolicies []string `json:"identity_policies,omitempty"` + Metadata map[string]string `json:"metadata"` + LeaseDuration int `json:"lease_duration"` + Renewable bool `json:"renewable"` + EntityID string `json:"entity_id"` + TokenType string `json:"token_type"` + Orphan bool `json:"orphan"` + MFARequirement *MFARequirement `json:"mfa_requirement"` + NumUses int `json:"num_uses"` +} + +type HTTPWrapInfo struct { + Token string `json:"token"` + Accessor string `json:"accessor"` + TTL int `json:"ttl"` + CreationTime string `json:"creation_time"` + CreationPath string `json:"creation_path"` + WrappedAccessor string `json:"wrapped_accessor,omitempty"` +} + +type HTTPSysInjector struct { + Response *HTTPResponse +} + +func (h HTTPSysInjector) MarshalJSON() ([]byte, error) { + j, err := json.Marshal(h.Response) + if err != nil { + return nil, err + } + // Fast path no data or empty data + if h.Response.Data == nil || len(h.Response.Data) == 0 { + return j, nil + } + // Marshaling a response will always be a JSON object, meaning it will + // always start with '{', so we hijack this to prepend necessary values + + var buf bytes.Buffer + buf.WriteRune('{') + for k, v := range h.Response.Data { + // Marshal each key/value individually + mk, err := json.Marshal(k) + if err != nil { + return nil, err + } + mv, err := json.Marshal(v) + if err != nil { + return nil, err + } + // Write into the final buffer. We'll never have a valid response + // without any fields so we can unconditionally add a comma after each. + buf.WriteString(fmt.Sprintf("%s: %s, ", mk, mv)) + } + // Add the rest, without the first '{' + buf.Write(j[1:]) + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/version.pb.go b/vendor/github.com/hashicorp/vault/sdk/logical/version.pb.go new file mode 100644 index 00000000000..415970f1934 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/version.pb.go @@ -0,0 +1,204 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.7 +// source: sdk/logical/version.proto + +package logical + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_version_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_version_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_sdk_logical_version_proto_rawDescGZIP(), []int{0} +} + +// VersionReply is the reply for the Version method. +type VersionReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PluginVersion string `protobuf:"bytes,1,opt,name=plugin_version,json=pluginVersion,proto3" json:"plugin_version,omitempty"` +} + +func (x *VersionReply) Reset() { + *x = VersionReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_version_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VersionReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersionReply) ProtoMessage() {} + +func (x *VersionReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_version_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersionReply.ProtoReflect.Descriptor instead. +func (*VersionReply) Descriptor() ([]byte, []int) { + return file_sdk_logical_version_proto_rawDescGZIP(), []int{1} +} + +func (x *VersionReply) GetPluginVersion() string { + if x != nil { + return x.PluginVersion + } + return "" +} + +var File_sdk_logical_version_proto protoreflect.FileDescriptor + +var file_sdk_logical_version_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, 0x67, + 0x69, 0x63, 0x61, 0x6c, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x35, 0x0a, + 0x0c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, + 0x0e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x32, 0x41, 0x0a, 0x0d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x1a, 0x15, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, + 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, + 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sdk_logical_version_proto_rawDescOnce sync.Once + file_sdk_logical_version_proto_rawDescData = file_sdk_logical_version_proto_rawDesc +) + +func file_sdk_logical_version_proto_rawDescGZIP() []byte { + file_sdk_logical_version_proto_rawDescOnce.Do(func() { + file_sdk_logical_version_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_version_proto_rawDescData) + }) + return file_sdk_logical_version_proto_rawDescData +} + +var file_sdk_logical_version_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_sdk_logical_version_proto_goTypes = []interface{}{ + (*Empty)(nil), // 0: logical.Empty + (*VersionReply)(nil), // 1: logical.VersionReply +} +var file_sdk_logical_version_proto_depIdxs = []int32{ + 0, // 0: logical.PluginVersion.Version:input_type -> logical.Empty + 1, // 1: logical.PluginVersion.Version:output_type -> logical.VersionReply + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_sdk_logical_version_proto_init() } +func file_sdk_logical_version_proto_init() { + if File_sdk_logical_version_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_logical_version_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_version_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VersionReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_logical_version_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_sdk_logical_version_proto_goTypes, + DependencyIndexes: file_sdk_logical_version_proto_depIdxs, + MessageInfos: file_sdk_logical_version_proto_msgTypes, + }.Build() + File_sdk_logical_version_proto = out.File + file_sdk_logical_version_proto_rawDesc = nil + file_sdk_logical_version_proto_goTypes = nil + file_sdk_logical_version_proto_depIdxs = nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/version.proto b/vendor/github.com/hashicorp/vault/sdk/logical/version.proto new file mode 100644 index 00000000000..345051ae9de --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/version.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; +package logical; + +option go_package = "github.com/hashicorp/vault/sdk/logical"; + +message Empty {} + +// VersionReply is the reply for the Version method. +message VersionReply { + string plugin_version = 1; +} + +// PluginVersion is an optional RPC service implemented by plugins. +service PluginVersion { + // Version returns version information for the plugin. + rpc Version(Empty) returns (VersionReply); +} \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/version_grpc.pb.go b/vendor/github.com/hashicorp/vault/sdk/logical/version_grpc.pb.go new file mode 100644 index 00000000000..a69e9705997 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/version_grpc.pb.go @@ -0,0 +1,103 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package logical + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// PluginVersionClient is the client API for PluginVersion service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type PluginVersionClient interface { + // Version returns version information for the plugin. + Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionReply, error) +} + +type pluginVersionClient struct { + cc grpc.ClientConnInterface +} + +func NewPluginVersionClient(cc grpc.ClientConnInterface) PluginVersionClient { + return &pluginVersionClient{cc} +} + +func (c *pluginVersionClient) Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionReply, error) { + out := new(VersionReply) + err := c.cc.Invoke(ctx, "/logical.PluginVersion/Version", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// PluginVersionServer is the server API for PluginVersion service. +// All implementations must embed UnimplementedPluginVersionServer +// for forward compatibility +type PluginVersionServer interface { + // Version returns version information for the plugin. + Version(context.Context, *Empty) (*VersionReply, error) + mustEmbedUnimplementedPluginVersionServer() +} + +// UnimplementedPluginVersionServer must be embedded to have forward compatible implementations. +type UnimplementedPluginVersionServer struct { +} + +func (UnimplementedPluginVersionServer) Version(context.Context, *Empty) (*VersionReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") +} +func (UnimplementedPluginVersionServer) mustEmbedUnimplementedPluginVersionServer() {} + +// UnsafePluginVersionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to PluginVersionServer will +// result in compilation errors. +type UnsafePluginVersionServer interface { + mustEmbedUnimplementedPluginVersionServer() +} + +func RegisterPluginVersionServer(s grpc.ServiceRegistrar, srv PluginVersionServer) { + s.RegisterService(&PluginVersion_ServiceDesc, srv) +} + +func _PluginVersion_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PluginVersionServer).Version(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/logical.PluginVersion/Version", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PluginVersionServer).Version(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// PluginVersion_ServiceDesc is the grpc.ServiceDesc for PluginVersion service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var PluginVersion_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "logical.PluginVersion", + HandlerType: (*PluginVersionServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Version", + Handler: _PluginVersion_Version_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "sdk/logical/version.proto", +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/cache.go b/vendor/github.com/hashicorp/vault/sdk/physical/cache.go new file mode 100644 index 00000000000..af40f538595 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/cache.go @@ -0,0 +1,260 @@ +package physical + +import ( + "context" + "sync/atomic" + + metrics "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + lru "github.com/hashicorp/golang-lru" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/helper/pathmanager" +) + +const ( + // DefaultCacheSize is used if no cache size is specified for NewCache + DefaultCacheSize = 128 * 1024 + + // refreshCacheCtxKey is a ctx value that denotes the cache should be + // refreshed during a Get call. + refreshCacheCtxKey = "refresh_cache" +) + +// These paths don't need to be cached by the LRU cache. This should +// particularly help memory pressure when unsealing. +var cacheExceptionsPaths = []string{ + "wal/logs/", + "index/pages/", + "index-dr/pages/", + "sys/expire/", + "core/poison-pill", + "core/raft/tls", +} + +// CacheRefreshContext returns a context with an added value denoting if the +// cache should attempt a refresh. +func CacheRefreshContext(ctx context.Context, r bool) context.Context { + return context.WithValue(ctx, refreshCacheCtxKey, r) +} + +// cacheRefreshFromContext is a helper to look up if the provided context is +// requesting a cache refresh. +func cacheRefreshFromContext(ctx context.Context) bool { + r, ok := ctx.Value(refreshCacheCtxKey).(bool) + if !ok { + return false + } + return r +} + +// Cache is used to wrap an underlying physical backend +// and provide an LRU cache layer on top. Most of the reads done by +// Vault are for policy objects so there is a large read reduction +// by using a simple write-through cache. +type Cache struct { + backend Backend + lru *lru.TwoQueueCache + locks []*locksutil.LockEntry + logger log.Logger + enabled *uint32 + cacheExceptions *pathmanager.PathManager + metricSink metrics.MetricSink +} + +// TransactionalCache is a Cache that wraps the physical that is transactional +type TransactionalCache struct { + *Cache + Transactional +} + +// Verify Cache satisfies the correct interfaces +var ( + _ ToggleablePurgemonster = (*Cache)(nil) + _ ToggleablePurgemonster = (*TransactionalCache)(nil) + _ Backend = (*Cache)(nil) + _ Transactional = (*TransactionalCache)(nil) +) + +// NewCache returns a physical cache of the given size. +// If no size is provided, the default size is used. +func NewCache(b Backend, size int, logger log.Logger, metricSink metrics.MetricSink) *Cache { + if logger.IsDebug() { + logger.Debug("creating LRU cache", "size", size) + } + if size <= 0 { + size = DefaultCacheSize + } + + pm := pathmanager.New() + pm.AddPaths(cacheExceptionsPaths) + + cache, _ := lru.New2Q(size) + c := &Cache{ + backend: b, + lru: cache, + locks: locksutil.CreateLocks(), + logger: logger, + // This fails safe. + enabled: new(uint32), + cacheExceptions: pm, + metricSink: metricSink, + } + return c +} + +func NewTransactionalCache(b Backend, size int, logger log.Logger, metricSink metrics.MetricSink) *TransactionalCache { + c := &TransactionalCache{ + Cache: NewCache(b, size, logger, metricSink), + Transactional: b.(Transactional), + } + return c +} + +func (c *Cache) ShouldCache(key string) bool { + if atomic.LoadUint32(c.enabled) == 0 { + return false + } + + return !c.cacheExceptions.HasPath(key) +} + +// SetEnabled is used to toggle whether the cache is on or off. It must be +// called with true to actually activate the cache after creation. +func (c *Cache) SetEnabled(enabled bool) { + if enabled { + atomic.StoreUint32(c.enabled, 1) + return + } + atomic.StoreUint32(c.enabled, 0) +} + +// Purge is used to clear the cache +func (c *Cache) Purge(ctx context.Context) { + // Lock the world + for _, lock := range c.locks { + lock.Lock() + defer lock.Unlock() + } + + c.lru.Purge() +} + +func (c *Cache) Put(ctx context.Context, entry *Entry) error { + if entry != nil && !c.ShouldCache(entry.Key) { + return c.backend.Put(ctx, entry) + } + + lock := locksutil.LockForKey(c.locks, entry.Key) + lock.Lock() + defer lock.Unlock() + + err := c.backend.Put(ctx, entry) + if err == nil { + c.lru.Add(entry.Key, entry) + c.metricSink.IncrCounter([]string{"cache", "write"}, 1) + } + return err +} + +func (c *Cache) Get(ctx context.Context, key string) (*Entry, error) { + if !c.ShouldCache(key) { + return c.backend.Get(ctx, key) + } + + lock := locksutil.LockForKey(c.locks, key) + lock.RLock() + defer lock.RUnlock() + + // Check the LRU first + if !cacheRefreshFromContext(ctx) { + if raw, ok := c.lru.Get(key); ok { + if raw == nil { + return nil, nil + } + c.metricSink.IncrCounter([]string{"cache", "hit"}, 1) + return raw.(*Entry), nil + } + } + + c.metricSink.IncrCounter([]string{"cache", "miss"}, 1) + // Read from the underlying backend + ent, err := c.backend.Get(ctx, key) + if err != nil { + return nil, err + } + + // Cache the result, even if nil + c.lru.Add(key, ent) + + return ent, nil +} + +func (c *Cache) Delete(ctx context.Context, key string) error { + if !c.ShouldCache(key) { + return c.backend.Delete(ctx, key) + } + + lock := locksutil.LockForKey(c.locks, key) + lock.Lock() + defer lock.Unlock() + + err := c.backend.Delete(ctx, key) + if err == nil { + c.lru.Remove(key) + } + return err +} + +func (c *Cache) List(ctx context.Context, prefix string) ([]string, error) { + // Always pass-through as this would be difficult to cache. For the same + // reason we don't lock as we can't reasonably know which locks to readlock + // ahead of time. + return c.backend.List(ctx, prefix) +} + +func (c *TransactionalCache) Locks() []*locksutil.LockEntry { + return c.locks +} + +func (c *TransactionalCache) LRU() *lru.TwoQueueCache { + return c.lru +} + +func (c *TransactionalCache) Transaction(ctx context.Context, txns []*TxnEntry) error { + // Bypass the locking below + if atomic.LoadUint32(c.enabled) == 0 { + return c.Transactional.Transaction(ctx, txns) + } + + // Collect keys that need to be locked + var keys []string + for _, curr := range txns { + keys = append(keys, curr.Entry.Key) + } + // Lock the keys + for _, l := range locksutil.LocksForKeys(c.locks, keys) { + l.Lock() + defer l.Unlock() + } + + if err := c.Transactional.Transaction(ctx, txns); err != nil { + return err + } + + for _, txn := range txns { + if !c.ShouldCache(txn.Entry.Key) { + continue + } + + switch txn.Operation { + case PutOperation: + c.lru.Add(txn.Entry.Key, txn.Entry) + c.metricSink.IncrCounter([]string{"cache", "write"}, 1) + case DeleteOperation: + c.lru.Remove(txn.Entry.Key) + c.metricSink.IncrCounter([]string{"cache", "delete"}, 1) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/encoding.go b/vendor/github.com/hashicorp/vault/sdk/physical/encoding.go new file mode 100644 index 00000000000..dbde84cc6dc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/encoding.go @@ -0,0 +1,108 @@ +package physical + +import ( + "context" + "errors" + "strings" + "unicode" + "unicode/utf8" +) + +var ( + ErrNonUTF8 = errors.New("key contains invalid UTF-8 characters") + ErrNonPrintable = errors.New("key contains non-printable characters") +) + +// StorageEncoding is used to add errors into underlying physical requests +type StorageEncoding struct { + Backend +} + +// TransactionalStorageEncoding is the transactional version of the error +// injector +type TransactionalStorageEncoding struct { + *StorageEncoding + Transactional +} + +// Verify StorageEncoding satisfies the correct interfaces +var ( + _ Backend = (*StorageEncoding)(nil) + _ Transactional = (*TransactionalStorageEncoding)(nil) +) + +// NewStorageEncoding returns a wrapped physical backend and verifies the key +// encoding +func NewStorageEncoding(b Backend) Backend { + enc := &StorageEncoding{ + Backend: b, + } + + if bTxn, ok := b.(Transactional); ok { + return &TransactionalStorageEncoding{ + StorageEncoding: enc, + Transactional: bTxn, + } + } + + return enc +} + +func (e *StorageEncoding) containsNonPrintableChars(key string) bool { + idx := strings.IndexFunc(key, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + return idx != -1 +} + +func (e *StorageEncoding) Put(ctx context.Context, entry *Entry) error { + if !utf8.ValidString(entry.Key) { + return ErrNonUTF8 + } + + if e.containsNonPrintableChars(entry.Key) { + return ErrNonPrintable + } + + return e.Backend.Put(ctx, entry) +} + +func (e *StorageEncoding) Delete(ctx context.Context, key string) error { + if !utf8.ValidString(key) { + return ErrNonUTF8 + } + + if e.containsNonPrintableChars(key) { + return ErrNonPrintable + } + + return e.Backend.Delete(ctx, key) +} + +func (e *TransactionalStorageEncoding) Transaction(ctx context.Context, txns []*TxnEntry) error { + for _, txn := range txns { + if !utf8.ValidString(txn.Entry.Key) { + return ErrNonUTF8 + } + + if e.containsNonPrintableChars(txn.Entry.Key) { + return ErrNonPrintable + } + + } + + return e.Transactional.Transaction(ctx, txns) +} + +func (e *StorageEncoding) Purge(ctx context.Context) { + if purgeable, ok := e.Backend.(ToggleablePurgemonster); ok { + purgeable.Purge(ctx) + } +} + +func (e *StorageEncoding) SetEnabled(enabled bool) { + if purgeable, ok := e.Backend.(ToggleablePurgemonster); ok { + purgeable.SetEnabled(enabled) + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/entry.go b/vendor/github.com/hashicorp/vault/sdk/physical/entry.go new file mode 100644 index 00000000000..389fe6c81c1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/entry.go @@ -0,0 +1,20 @@ +package physical + +import ( + "encoding/hex" + "fmt" +) + +// Entry is used to represent data stored by the physical backend +type Entry struct { + Key string + Value []byte + SealWrap bool `json:"seal_wrap,omitempty"` + + // Only used in replication + ValueHash []byte +} + +func (e *Entry) String() string { + return fmt.Sprintf("Key: %s. SealWrap: %t. Value: %s. ValueHash: %s", e.Key, e.SealWrap, hex.EncodeToString(e.Value), hex.EncodeToString(e.ValueHash)) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/error.go b/vendor/github.com/hashicorp/vault/sdk/physical/error.go new file mode 100644 index 00000000000..b547e4e4288 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/error.go @@ -0,0 +1,110 @@ +package physical + +import ( + "context" + "errors" + "math/rand" + "sync" + "time" + + log "github.com/hashicorp/go-hclog" +) + +const ( + // DefaultErrorPercent is used to determin how often we error + DefaultErrorPercent = 20 +) + +// ErrorInjector is used to add errors into underlying physical requests +type ErrorInjector struct { + backend Backend + errorPercent int + randomLock *sync.Mutex + random *rand.Rand +} + +// TransactionalErrorInjector is the transactional version of the error +// injector +type TransactionalErrorInjector struct { + *ErrorInjector + Transactional +} + +// Verify ErrorInjector satisfies the correct interfaces +var ( + _ Backend = (*ErrorInjector)(nil) + _ Transactional = (*TransactionalErrorInjector)(nil) +) + +// NewErrorInjector returns a wrapped physical backend to inject error +func NewErrorInjector(b Backend, errorPercent int, logger log.Logger) *ErrorInjector { + if errorPercent < 0 || errorPercent > 100 { + errorPercent = DefaultErrorPercent + } + logger.Info("creating error injector") + + return &ErrorInjector{ + backend: b, + errorPercent: errorPercent, + randomLock: new(sync.Mutex), + random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + } +} + +// NewTransactionalErrorInjector creates a new transactional ErrorInjector +func NewTransactionalErrorInjector(b Backend, errorPercent int, logger log.Logger) *TransactionalErrorInjector { + return &TransactionalErrorInjector{ + ErrorInjector: NewErrorInjector(b, errorPercent, logger), + Transactional: b.(Transactional), + } +} + +func (e *ErrorInjector) SetErrorPercentage(p int) { + e.errorPercent = p +} + +func (e *ErrorInjector) addError() error { + e.randomLock.Lock() + roll := e.random.Intn(100) + e.randomLock.Unlock() + if roll < e.errorPercent { + return errors.New("random error") + } + + return nil +} + +func (e *ErrorInjector) Put(ctx context.Context, entry *Entry) error { + if err := e.addError(); err != nil { + return err + } + return e.backend.Put(ctx, entry) +} + +func (e *ErrorInjector) Get(ctx context.Context, key string) (*Entry, error) { + if err := e.addError(); err != nil { + return nil, err + } + return e.backend.Get(ctx, key) +} + +func (e *ErrorInjector) Delete(ctx context.Context, key string) error { + if err := e.addError(); err != nil { + return err + } + return e.backend.Delete(ctx, key) +} + +func (e *ErrorInjector) List(ctx context.Context, prefix string) ([]string, error) { + if err := e.addError(); err != nil { + return nil, err + } + return e.backend.List(ctx, prefix) +} + +func (e *TransactionalErrorInjector) Transaction(ctx context.Context, txns []*TxnEntry) error { + if err := e.addError(); err != nil { + return err + } + return e.Transactional.Transaction(ctx, txns) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go b/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go new file mode 100644 index 00000000000..be16b4caa12 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go @@ -0,0 +1,310 @@ +package inmem + +import ( + "context" + "errors" + "fmt" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + + "github.com/armon/go-radix" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/physical" +) + +// Verify interfaces are satisfied +var ( + _ physical.Backend = (*InmemBackend)(nil) + _ physical.HABackend = (*InmemHABackend)(nil) + _ physical.HABackend = (*TransactionalInmemHABackend)(nil) + _ physical.Lock = (*InmemLock)(nil) + _ physical.Transactional = (*TransactionalInmemBackend)(nil) + _ physical.Transactional = (*TransactionalInmemHABackend)(nil) +) + +var ( + PutDisabledError = errors.New("put operations disabled in inmem backend") + GetDisabledError = errors.New("get operations disabled in inmem backend") + DeleteDisabledError = errors.New("delete operations disabled in inmem backend") + ListDisabledError = errors.New("list operations disabled in inmem backend") + GetInTxnDisabledError = errors.New("get operations inside transactions are disabled in inmem backend") +) + +// InmemBackend is an in-memory only physical backend. It is useful +// for testing and development situations where the data is not +// expected to be durable. +type InmemBackend struct { + sync.RWMutex + root *radix.Tree + permitPool *physical.PermitPool + logger log.Logger + failGet *uint32 + failPut *uint32 + failDelete *uint32 + failList *uint32 + failGetInTxn *uint32 + logOps bool + maxValueSize int +} + +type TransactionalInmemBackend struct { + InmemBackend +} + +// NewInmem constructs a new in-memory backend +func NewInmem(conf map[string]string, logger log.Logger) (physical.Backend, error) { + maxValueSize := 0 + maxValueSizeStr, ok := conf["max_value_size"] + if ok { + var err error + maxValueSize, err = strconv.Atoi(maxValueSizeStr) + if err != nil { + return nil, err + } + } + + return &InmemBackend{ + root: radix.New(), + permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), + logger: logger, + failGet: new(uint32), + failPut: new(uint32), + failDelete: new(uint32), + failList: new(uint32), + failGetInTxn: new(uint32), + logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "", + maxValueSize: maxValueSize, + }, nil +} + +// Basically for now just creates a permit pool of size 1 so only one operation +// can run at a time +func NewTransactionalInmem(conf map[string]string, logger log.Logger) (physical.Backend, error) { + maxValueSize := 0 + maxValueSizeStr, ok := conf["max_value_size"] + if ok { + var err error + maxValueSize, err = strconv.Atoi(maxValueSizeStr) + if err != nil { + return nil, err + } + } + + return &TransactionalInmemBackend{ + InmemBackend: InmemBackend{ + root: radix.New(), + permitPool: physical.NewPermitPool(1), + logger: logger, + failGet: new(uint32), + failPut: new(uint32), + failDelete: new(uint32), + failList: new(uint32), + failGetInTxn: new(uint32), + logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "", + maxValueSize: maxValueSize, + }, + }, nil +} + +// Put is used to insert or update an entry +func (i *InmemBackend) Put(ctx context.Context, entry *physical.Entry) error { + i.permitPool.Acquire() + defer i.permitPool.Release() + + i.Lock() + defer i.Unlock() + + return i.PutInternal(ctx, entry) +} + +func (i *InmemBackend) PutInternal(ctx context.Context, entry *physical.Entry) error { + if i.logOps { + i.logger.Trace("put", "key", entry.Key) + } + if atomic.LoadUint32(i.failPut) != 0 { + return PutDisabledError + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if i.maxValueSize > 0 && len(entry.Value) > i.maxValueSize { + return fmt.Errorf("%s", physical.ErrValueTooLarge) + } + + i.root.Insert(entry.Key, entry.Value) + return nil +} + +func (i *InmemBackend) FailPut(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failPut, val) +} + +// Get is used to fetch an entry +func (i *InmemBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + i.permitPool.Acquire() + defer i.permitPool.Release() + + i.RLock() + defer i.RUnlock() + + return i.GetInternal(ctx, key) +} + +func (i *InmemBackend) GetInternal(ctx context.Context, key string) (*physical.Entry, error) { + if i.logOps { + i.logger.Trace("get", "key", key) + } + if atomic.LoadUint32(i.failGet) != 0 { + return nil, GetDisabledError + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if raw, ok := i.root.Get(key); ok { + return &physical.Entry{ + Key: key, + Value: raw.([]byte), + }, nil + } + return nil, nil +} + +func (i *InmemBackend) FailGet(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failGet, val) +} + +func (i *InmemBackend) FailGetInTxn(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failGetInTxn, val) +} + +// Delete is used to permanently delete an entry +func (i *InmemBackend) Delete(ctx context.Context, key string) error { + i.permitPool.Acquire() + defer i.permitPool.Release() + + i.Lock() + defer i.Unlock() + + return i.DeleteInternal(ctx, key) +} + +func (i *InmemBackend) DeleteInternal(ctx context.Context, key string) error { + if i.logOps { + i.logger.Trace("delete", "key", key) + } + if atomic.LoadUint32(i.failDelete) != 0 { + return DeleteDisabledError + } + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + i.root.Delete(key) + return nil +} + +func (i *InmemBackend) FailDelete(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failDelete, val) +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (i *InmemBackend) List(ctx context.Context, prefix string) ([]string, error) { + i.permitPool.Acquire() + defer i.permitPool.Release() + + i.RLock() + defer i.RUnlock() + + return i.ListInternal(ctx, prefix) +} + +func (i *InmemBackend) ListInternal(ctx context.Context, prefix string) ([]string, error) { + if i.logOps { + i.logger.Trace("list", "prefix", prefix) + } + if atomic.LoadUint32(i.failList) != 0 { + return nil, ListDisabledError + } + + var out []string + seen := make(map[string]interface{}) + walkFn := func(s string, v interface{}) bool { + trimmed := strings.TrimPrefix(s, prefix) + sep := strings.Index(trimmed, "/") + if sep == -1 { + out = append(out, trimmed) + } else { + trimmed = trimmed[:sep+1] + if _, ok := seen[trimmed]; !ok { + out = append(out, trimmed) + seen[trimmed] = struct{}{} + } + } + return false + } + i.root.WalkPrefix(prefix, walkFn) + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + return out, nil +} + +func (i *InmemBackend) FailList(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failList, val) +} + +// Transaction implements the transaction interface +func (t *TransactionalInmemBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + t.permitPool.Acquire() + defer t.permitPool.Release() + + t.Lock() + defer t.Unlock() + + failGetInTxn := atomic.LoadUint32(t.failGetInTxn) + for _, t := range txns { + if t.Operation == physical.GetOperation && failGetInTxn != 0 { + return GetInTxnDisabledError + } + } + + return physical.GenericTransactionHandler(ctx, t, txns) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go b/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go new file mode 100644 index 00000000000..64fcb3a66dc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go @@ -0,0 +1,167 @@ +package inmem + +import ( + "fmt" + "sync" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/physical" +) + +type InmemHABackend struct { + physical.Backend + locks map[string]string + l *sync.Mutex + cond *sync.Cond + logger log.Logger +} + +type TransactionalInmemHABackend struct { + physical.Transactional + InmemHABackend +} + +// NewInmemHA constructs a new in-memory HA backend. This is only for testing. +func NewInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) { + be, err := NewInmem(nil, logger) + if err != nil { + return nil, err + } + + in := &InmemHABackend{ + Backend: be, + locks: make(map[string]string), + logger: logger, + l: new(sync.Mutex), + } + in.cond = sync.NewCond(in.l) + return in, nil +} + +func NewTransactionalInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) { + transInmem, err := NewTransactionalInmem(nil, logger) + if err != nil { + return nil, err + } + inmemHA := InmemHABackend{ + Backend: transInmem, + locks: make(map[string]string), + logger: logger, + l: new(sync.Mutex), + } + + in := &TransactionalInmemHABackend{ + InmemHABackend: inmemHA, + Transactional: transInmem.(physical.Transactional), + } + in.cond = sync.NewCond(in.l) + return in, nil +} + +// LockWith is used for mutual exclusion based on the given key. +func (i *InmemHABackend) LockWith(key, value string) (physical.Lock, error) { + l := &InmemLock{ + in: i, + key: key, + value: value, + } + return l, nil +} + +// LockMapSize is used in some tests to determine whether this backend has ever +// been used for HA purposes rather than simply for storage +func (i *InmemHABackend) LockMapSize() int { + return len(i.locks) +} + +// HAEnabled indicates whether the HA functionality should be exposed. +// Currently always returns true. +func (i *InmemHABackend) HAEnabled() bool { + return true +} + +// InmemLock is an in-memory Lock implementation for the HABackend +type InmemLock struct { + in *InmemHABackend + key string + value string + + held bool + leaderCh chan struct{} + l sync.Mutex +} + +func (i *InmemLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + i.l.Lock() + defer i.l.Unlock() + if i.held { + return nil, fmt.Errorf("lock already held") + } + + // Attempt an async acquisition + didLock := make(chan struct{}) + releaseCh := make(chan bool, 1) + go func() { + // Wait to acquire the lock + i.in.l.Lock() + _, ok := i.in.locks[i.key] + for ok { + i.in.cond.Wait() + _, ok = i.in.locks[i.key] + } + i.in.locks[i.key] = i.value + i.in.l.Unlock() + + // Signal that lock is held + close(didLock) + + // Handle an early abort + release := <-releaseCh + if release { + i.in.l.Lock() + delete(i.in.locks, i.key) + i.in.l.Unlock() + i.in.cond.Broadcast() + } + }() + + // Wait for lock acquisition or shutdown + select { + case <-didLock: + releaseCh <- false + case <-stopCh: + releaseCh <- true + return nil, nil + } + + // Create the leader channel + i.held = true + i.leaderCh = make(chan struct{}) + return i.leaderCh, nil +} + +func (i *InmemLock) Unlock() error { + i.l.Lock() + defer i.l.Unlock() + + if !i.held { + return nil + } + + close(i.leaderCh) + i.leaderCh = nil + i.held = false + + i.in.l.Lock() + delete(i.in.locks, i.key) + i.in.l.Unlock() + i.in.cond.Broadcast() + return nil +} + +func (i *InmemLock) Value() (bool, string, error) { + i.in.l.Lock() + val, ok := i.in.locks[i.key] + i.in.l.Unlock() + return ok, val, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/latency.go b/vendor/github.com/hashicorp/vault/sdk/physical/latency.go new file mode 100644 index 00000000000..18b2c4c1451 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/latency.go @@ -0,0 +1,113 @@ +package physical + +import ( + "context" + "math/rand" + "sync" + "time" + + log "github.com/hashicorp/go-hclog" + uberAtomic "go.uber.org/atomic" +) + +const ( + // DefaultJitterPercent is used if no cache size is specified for NewCache + DefaultJitterPercent = 20 +) + +// LatencyInjector is used to add latency into underlying physical requests +type LatencyInjector struct { + logger log.Logger + backend Backend + latency *uberAtomic.Duration + jitterPercent int + randomLock *sync.Mutex + random *rand.Rand +} + +// TransactionalLatencyInjector is the transactional version of the latency +// injector +type TransactionalLatencyInjector struct { + *LatencyInjector + Transactional +} + +// Verify LatencyInjector satisfies the correct interfaces +var ( + _ Backend = (*LatencyInjector)(nil) + _ Transactional = (*TransactionalLatencyInjector)(nil) +) + +// NewLatencyInjector returns a wrapped physical backend to simulate latency +func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *LatencyInjector { + if jitter < 0 || jitter > 100 { + jitter = DefaultJitterPercent + } + logger.Info("creating latency injector") + + return &LatencyInjector{ + logger: logger, + backend: b, + latency: uberAtomic.NewDuration(latency), + jitterPercent: jitter, + randomLock: new(sync.Mutex), + random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + } +} + +// NewTransactionalLatencyInjector creates a new transactional LatencyInjector +func NewTransactionalLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *TransactionalLatencyInjector { + return &TransactionalLatencyInjector{ + LatencyInjector: NewLatencyInjector(b, latency, jitter, logger), + Transactional: b.(Transactional), + } +} + +func (l *LatencyInjector) SetLatency(latency time.Duration) { + l.logger.Info("Changing backend latency", "latency", latency) + l.latency.Store(latency) +} + +func (l *LatencyInjector) addLatency() { + // Calculate a value between 1 +- jitter% + percent := 100 + if l.jitterPercent > 0 { + min := 100 - l.jitterPercent + max := 100 + l.jitterPercent + l.randomLock.Lock() + percent = l.random.Intn(max-min) + min + l.randomLock.Unlock() + } + latencyDuration := time.Duration(int(l.latency.Load()) * percent / 100) + time.Sleep(latencyDuration) +} + +// Put is a latent put request +func (l *LatencyInjector) Put(ctx context.Context, entry *Entry) error { + l.addLatency() + return l.backend.Put(ctx, entry) +} + +// Get is a latent get request +func (l *LatencyInjector) Get(ctx context.Context, key string) (*Entry, error) { + l.addLatency() + return l.backend.Get(ctx, key) +} + +// Delete is a latent delete request +func (l *LatencyInjector) Delete(ctx context.Context, key string) error { + l.addLatency() + return l.backend.Delete(ctx, key) +} + +// List is a latent list request +func (l *LatencyInjector) List(ctx context.Context, prefix string) ([]string, error) { + l.addLatency() + return l.backend.List(ctx, prefix) +} + +// Transaction is a latent transaction request +func (l *TransactionalLatencyInjector) Transaction(ctx context.Context, txns []*TxnEntry) error { + l.addLatency() + return l.Transactional.Transaction(ctx, txns) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/physical.go b/vendor/github.com/hashicorp/vault/sdk/physical/physical.go new file mode 100644 index 00000000000..808abd50fcd --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/physical.go @@ -0,0 +1,134 @@ +package physical + +import ( + "context" + "strings" + + log "github.com/hashicorp/go-hclog" +) + +const DefaultParallelOperations = 128 + +// The operation type +type Operation string + +const ( + DeleteOperation Operation = "delete" + GetOperation = "get" + ListOperation = "list" + PutOperation = "put" +) + +const ( + ErrValueTooLarge = "put failed due to value being too large" + ErrKeyTooLarge = "put failed due to key being too large" +) + +// Backend is the interface required for a physical +// backend. A physical backend is used to durably store +// data outside of Vault. As such, it is completely untrusted, +// and is only accessed via a security barrier. The backends +// must represent keys in a hierarchical manner. All methods +// are expected to be thread safe. +type Backend interface { + // Put is used to insert or update an entry + Put(ctx context.Context, entry *Entry) error + + // Get is used to fetch an entry + Get(ctx context.Context, key string) (*Entry, error) + + // Delete is used to permanently delete an entry + Delete(ctx context.Context, key string) error + + // List is used to list all the keys under a given + // prefix, up to the next prefix. + List(ctx context.Context, prefix string) ([]string, error) +} + +// HABackend is an extensions to the standard physical +// backend to support high-availability. Vault only expects to +// use mutual exclusion to allow multiple instances to act as a +// hot standby for a leader that services all requests. +type HABackend interface { + // LockWith is used for mutual exclusion based on the given key. + LockWith(key, value string) (Lock, error) + + // Whether or not HA functionality is enabled + HAEnabled() bool +} + +// ToggleablePurgemonster is an interface for backends that can toggle on or +// off special functionality and/or support purging. This is only used for the +// cache, don't use it for other things. +type ToggleablePurgemonster interface { + Purge(ctx context.Context) + SetEnabled(bool) +} + +// RedirectDetect is an optional interface that an HABackend +// can implement. If they do, a redirect address can be automatically +// detected. +type RedirectDetect interface { + // DetectHostAddr is used to detect the host address + DetectHostAddr() (string, error) +} + +type Lock interface { + // Lock is used to acquire the given lock + // The stopCh is optional and if closed should interrupt the lock + // acquisition attempt. The return struct should be closed when + // leadership is lost. + Lock(stopCh <-chan struct{}) (<-chan struct{}, error) + + // Unlock is used to release the lock + Unlock() error + + // Returns the value of the lock and if it is held + Value() (bool, string, error) +} + +// Factory is the factory function to create a physical backend. +type Factory func(config map[string]string, logger log.Logger) (Backend, error) + +// PermitPool is used to limit maximum outstanding requests +type PermitPool struct { + sem chan int +} + +// NewPermitPool returns a new permit pool with the provided +// number of permits +func NewPermitPool(permits int) *PermitPool { + if permits < 1 { + permits = DefaultParallelOperations + } + return &PermitPool{ + sem: make(chan int, permits), + } +} + +// Acquire returns when a permit has been acquired +func (c *PermitPool) Acquire() { + c.sem <- 1 +} + +// Release returns a permit to the pool +func (c *PermitPool) Release() { + <-c.sem +} + +// Get number of requests in the permit pool +func (c *PermitPool) CurrentPermits() int { + return len(c.sem) +} + +// Prefixes is a shared helper function returns all parent 'folders' for a +// given vault key. +// e.g. for 'foo/bar/baz', it returns ['foo', 'foo/bar'] +func Prefixes(s string) []string { + components := strings.Split(s, "/") + result := []string{} + for i := 1; i < len(components); i++ { + result = append(result, strings.Join(components[:i], "/")) + } + return result +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go b/vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go new file mode 100644 index 00000000000..7497313afca --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go @@ -0,0 +1,40 @@ +package physical + +import ( + "context" +) + +// PhysicalAccess is a wrapper around physical.Backend that allows Core to +// expose its physical storage operations through PhysicalAccess() while +// restricting the ability to modify Core.physical itself. +type PhysicalAccess struct { + physical Backend +} + +var _ Backend = (*PhysicalAccess)(nil) + +func NewPhysicalAccess(physical Backend) *PhysicalAccess { + return &PhysicalAccess{physical: physical} +} + +func (p *PhysicalAccess) Put(ctx context.Context, entry *Entry) error { + return p.physical.Put(ctx, entry) +} + +func (p *PhysicalAccess) Get(ctx context.Context, key string) (*Entry, error) { + return p.physical.Get(ctx, key) +} + +func (p *PhysicalAccess) Delete(ctx context.Context, key string) error { + return p.physical.Delete(ctx, key) +} + +func (p *PhysicalAccess) List(ctx context.Context, prefix string) ([]string, error) { + return p.physical.List(ctx, prefix) +} + +func (p *PhysicalAccess) Purge(ctx context.Context) { + if purgeable, ok := p.physical.(ToggleablePurgemonster); ok { + purgeable.Purge(ctx) + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go b/vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go new file mode 100644 index 00000000000..189ac93172a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go @@ -0,0 +1,94 @@ +package physical + +import ( + "context" + "errors" + "strings" +) + +var ErrRelativePath = errors.New("relative paths not supported") + +// View represents a prefixed view of a physical backend +type View struct { + backend Backend + prefix string +} + +// Verify View satisfies the correct interfaces +var _ Backend = (*View)(nil) + +// NewView takes an underlying physical backend and returns +// a view of it that can only operate with the given prefix. +func NewView(backend Backend, prefix string) *View { + return &View{ + backend: backend, + prefix: prefix, + } +} + +// List the contents of the prefixed view +func (v *View) List(ctx context.Context, prefix string) ([]string, error) { + if err := v.sanityCheck(prefix); err != nil { + return nil, err + } + return v.backend.List(ctx, v.expandKey(prefix)) +} + +// Get the key of the prefixed view +func (v *View) Get(ctx context.Context, key string) (*Entry, error) { + if err := v.sanityCheck(key); err != nil { + return nil, err + } + entry, err := v.backend.Get(ctx, v.expandKey(key)) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + entry.Key = v.truncateKey(entry.Key) + + return &Entry{ + Key: entry.Key, + Value: entry.Value, + }, nil +} + +// Put the entry into the prefix view +func (v *View) Put(ctx context.Context, entry *Entry) error { + if err := v.sanityCheck(entry.Key); err != nil { + return err + } + + nested := &Entry{ + Key: v.expandKey(entry.Key), + Value: entry.Value, + } + return v.backend.Put(ctx, nested) +} + +// Delete the entry from the prefix view +func (v *View) Delete(ctx context.Context, key string) error { + if err := v.sanityCheck(key); err != nil { + return err + } + return v.backend.Delete(ctx, v.expandKey(key)) +} + +// sanityCheck is used to perform a sanity check on a key +func (v *View) sanityCheck(key string) error { + if strings.Contains(key, "..") { + return ErrRelativePath + } + return nil +} + +// expandKey is used to expand to the full key path with the prefix +func (v *View) expandKey(suffix string) string { + return v.prefix + suffix +} + +// truncateKey is used to remove the prefix of the key +func (v *View) truncateKey(full string) string { + return strings.TrimPrefix(full, v.prefix) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/testing.go b/vendor/github.com/hashicorp/vault/sdk/physical/testing.go new file mode 100644 index 00000000000..6e0ddfcc0ea --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/testing.go @@ -0,0 +1,497 @@ +package physical + +import ( + "context" + "reflect" + "sort" + "testing" + "time" +) + +func ExerciseBackend(t testing.TB, b Backend) { + t.Helper() + + // Should be empty + keys, err := b.List(context.Background(), "") + if err != nil { + t.Fatalf("initial list failed: %v", err) + } + if len(keys) != 0 { + t.Errorf("initial not empty: %v", keys) + } + + // Delete should work if it does not exist + err = b.Delete(context.Background(), "foo") + if err != nil { + t.Fatalf("idempotent delete: %v", err) + } + + // Get should not fail, but be nil + out, err := b.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("initial get failed: %v", err) + } + if out != nil { + t.Errorf("initial get was not nil: %v", out) + } + + // Make an entry + e := &Entry{Key: "foo", Value: []byte("test")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("put failed: %v", err) + } + + // Get should work + out, err = b.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("get failed: %v", err) + } + if !reflect.DeepEqual(out, e) { + t.Errorf("bad: %v expected: %v", out, e) + } + + // List should not be empty + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("list failed: %v", err) + } + if len(keys) != 1 || keys[0] != "foo" { + t.Errorf("keys[0] did not equal foo: %v", keys) + } + + // Delete should work + err = b.Delete(context.Background(), "foo") + if err != nil { + t.Fatalf("delete: %v", err) + } + + // Should be empty + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("list after delete: %v", err) + } + if len(keys) != 0 { + t.Errorf("list after delete not empty: %v", keys) + } + + // Get should fail + out, err = b.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("get after delete: %v", err) + } + if out != nil { + t.Errorf("get after delete not nil: %v", out) + } + + // Multiple Puts should work; GH-189 + e = &Entry{Key: "foo", Value: []byte("test")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("multi put 1 failed: %v", err) + } + e = &Entry{Key: "foo", Value: []byte("test")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("multi put 2 failed: %v", err) + } + + // Make a nested entry + e = &Entry{Key: "foo/bar", Value: []byte("baz")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("nested put failed: %v", err) + } + + // Get should work + out, err = b.Get(context.Background(), "foo/bar") + if err != nil { + t.Fatalf("get failed: %v", err) + } + if !reflect.DeepEqual(out, e) { + t.Errorf("bad: %v expected: %v", out, e) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("list multi failed: %v", err) + } + sort.Strings(keys) + if len(keys) != 2 || keys[0] != "foo" || keys[1] != "foo/" { + t.Errorf("expected 2 keys [foo, foo/]: %v", keys) + } + + // Delete with children should work + err = b.Delete(context.Background(), "foo") + if err != nil { + t.Fatalf("delete after multi: %v", err) + } + + // Get should return the child + out, err = b.Get(context.Background(), "foo/bar") + if err != nil { + t.Fatalf("get after multi delete: %v", err) + } + if out == nil { + t.Errorf("get after multi delete not nil: %v", out) + } + + // Removal of nested secret should not leave artifacts + e = &Entry{Key: "foo/nested1/nested2/nested3", Value: []byte("baz")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("deep nest: %v", err) + } + + err = b.Delete(context.Background(), "foo/nested1/nested2/nested3") + if err != nil { + t.Fatalf("failed to remove deep nest: %v", err) + } + + keys, err = b.List(context.Background(), "foo/") + if err != nil { + t.Fatalf("err: %v", err) + } + if len(keys) != 1 || keys[0] != "bar" { + t.Errorf("should be exactly 1 key == bar: %v", keys) + } + + // Make a second nested entry to test prefix removal + e = &Entry{Key: "foo/zip", Value: []byte("zap")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("failed to create second nested: %v", err) + } + + // Delete should not remove the prefix + err = b.Delete(context.Background(), "foo/bar") + if err != nil { + t.Fatalf("failed to delete nested prefix: %v", err) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("list nested prefix: %v", err) + } + if len(keys) != 1 || keys[0] != "foo/" { + t.Errorf("should be exactly 1 key == foo/: %v", keys) + } + + // Delete should remove the prefix + err = b.Delete(context.Background(), "foo/zip") + if err != nil { + t.Fatalf("failed to delete second prefix: %v", err) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("listing after second delete failed: %v", err) + } + if len(keys) != 0 { + t.Errorf("should be empty at end: %v", keys) + } + + // When the root path is empty, adding and removing deep nested values should not break listing + e = &Entry{Key: "foo/nested1/nested2/value1", Value: []byte("baz")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("deep nest: %v", err) + } + + e = &Entry{Key: "foo/nested1/nested2/value2", Value: []byte("baz")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("deep nest: %v", err) + } + + err = b.Delete(context.Background(), "foo/nested1/nested2/value2") + if err != nil { + t.Fatalf("failed to remove deep nest: %v", err) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("listing of root failed after deletion: %v", err) + } + if len(keys) == 0 { + t.Errorf("root is returning empty after deleting a single nested value, expected nested1/: %v", keys) + keys, err = b.List(context.Background(), "foo/nested1") + if err != nil { + t.Fatalf("listing of expected nested path 'foo/nested1' failed: %v", err) + } + // prove that the root should not be empty and that foo/nested1 exists + if len(keys) != 0 { + t.Logf(" keys can still be listed from nested1/ so it's not empty, expected nested2/: %v", keys) + } + } + + // cleanup left over listing bug test value + err = b.Delete(context.Background(), "foo/nested1/nested2/value1") + if err != nil { + t.Fatalf("failed to remove deep nest: %v", err) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("listing of root failed after delete of deep nest: %v", err) + } + if len(keys) != 0 { + t.Errorf("should be empty at end: %v", keys) + } +} + +func ExerciseBackend_ListPrefix(t testing.TB, b Backend) { + t.Helper() + + e1 := &Entry{Key: "foo", Value: []byte("test")} + e2 := &Entry{Key: "foo/bar", Value: []byte("test")} + e3 := &Entry{Key: "foo/bar/baz", Value: []byte("test")} + + defer func() { + b.Delete(context.Background(), "foo") + b.Delete(context.Background(), "foo/bar") + b.Delete(context.Background(), "foo/bar/baz") + }() + + err := b.Put(context.Background(), e1) + if err != nil { + t.Fatalf("failed to put entry 1: %v", err) + } + err = b.Put(context.Background(), e2) + if err != nil { + t.Fatalf("failed to put entry 2: %v", err) + } + err = b.Put(context.Background(), e3) + if err != nil { + t.Fatalf("failed to put entry 3: %v", err) + } + + // Scan the root + keys, err := b.List(context.Background(), "") + if err != nil { + t.Fatalf("list root: %v", err) + } + sort.Strings(keys) + if len(keys) != 2 || keys[0] != "foo" || keys[1] != "foo/" { + t.Errorf("root expected [foo foo/]: %v", keys) + } + + // Scan foo/ + keys, err = b.List(context.Background(), "foo/") + if err != nil { + t.Fatalf("list level 1: %v", err) + } + sort.Strings(keys) + if len(keys) != 2 || keys[0] != "bar" || keys[1] != "bar/" { + t.Errorf("level 1 expected [bar bar/]: %v", keys) + } + + // Scan foo/bar/ + keys, err = b.List(context.Background(), "foo/bar/") + if err != nil { + t.Fatalf("list level 2: %v", err) + } + sort.Strings(keys) + if len(keys) != 1 || keys[0] != "baz" { + t.Errorf("level 1 expected [baz]: %v", keys) + } +} + +func ExerciseHABackend(t testing.TB, b HABackend, b2 HABackend) { + t.Helper() + + // Get the lock + lock, err := b.LockWith("foo", "bar") + if err != nil { + t.Fatalf("initial lock: %v", err) + } + + // Attempt to lock + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("lock attempt 1: %v", err) + } + if leaderCh == nil { + t.Fatalf("missing leaderCh") + } + + // Check the value + held, val, err := lock.Value() + if err != nil { + t.Fatalf("err: %v", err) + } + if !held { + t.Errorf("should be held") + } + if val != "bar" { + t.Errorf("expected value bar: %v", err) + } + + // Second acquisition should fail + lock2, err := b2.LockWith("foo", "baz") + if err != nil { + t.Fatalf("lock 2: %v", err) + } + + // Cancel attempt in 50 msec + stopCh := make(chan struct{}) + time.AfterFunc(50*time.Millisecond, func() { + close(stopCh) + }) + + // Attempt to lock + leaderCh2, err := lock2.Lock(stopCh) + if err != nil { + t.Fatalf("stop lock 2: %v", err) + } + if leaderCh2 != nil { + t.Errorf("should not have gotten leaderCh: %v", leaderCh2) + } + + // Release the first lock + lock.Unlock() + + // Attempt to lock should work + leaderCh2, err = lock2.Lock(nil) + if err != nil { + t.Fatalf("lock 2 lock: %v", err) + } + if leaderCh2 == nil { + t.Errorf("should get leaderCh") + } + + // Check the value + held, val, err = lock2.Value() + if err != nil { + t.Fatalf("value: %v", err) + } + if !held { + t.Errorf("should still be held") + } + if val != "baz" { + t.Errorf("expected: baz, got: %v", val) + } + + // Cleanup + lock2.Unlock() +} + +func ExerciseTransactionalBackend(t testing.TB, b Backend) { + t.Helper() + tb, ok := b.(Transactional) + if !ok { + t.Fatal("Not a transactional backend") + } + + txns := SetupTestingTransactions(t, b) + + if err := tb.Transaction(context.Background(), txns); err != nil { + t.Fatal(err) + } + + keys, err := b.List(context.Background(), "") + if err != nil { + t.Fatal(err) + } + + expected := []string{"foo", "zip"} + + sort.Strings(keys) + sort.Strings(expected) + if !reflect.DeepEqual(keys, expected) { + t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys) + } + + entry, err := b.Get(context.Background(), "foo") + if err != nil { + t.Fatal(err) + } + if entry == nil { + t.Fatal("got nil entry") + } + if entry.Value == nil { + t.Fatal("got nil value") + } + if string(entry.Value) != "bar3" { + t.Fatal("updates did not apply correctly") + } + + entry, err = b.Get(context.Background(), "zip") + if err != nil { + t.Fatal(err) + } + if entry == nil { + t.Fatal("got nil entry") + } + if entry.Value == nil { + t.Fatal("got nil value") + } + if string(entry.Value) != "zap3" { + t.Fatal("updates did not apply correctly") + } +} + +func SetupTestingTransactions(t testing.TB, b Backend) []*TxnEntry { + t.Helper() + // Add a few keys so that we test rollback with deletion + if err := b.Put(context.Background(), &Entry{ + Key: "foo", + Value: []byte("bar"), + }); err != nil { + t.Fatal(err) + } + if err := b.Put(context.Background(), &Entry{ + Key: "zip", + Value: []byte("zap"), + }); err != nil { + t.Fatal(err) + } + if err := b.Put(context.Background(), &Entry{ + Key: "deleteme", + }); err != nil { + t.Fatal(err) + } + if err := b.Put(context.Background(), &Entry{ + Key: "deleteme2", + }); err != nil { + t.Fatal(err) + } + + txns := []*TxnEntry{ + { + Operation: PutOperation, + Entry: &Entry{ + Key: "foo", + Value: []byte("bar2"), + }, + }, + { + Operation: DeleteOperation, + Entry: &Entry{ + Key: "deleteme", + }, + }, + { + Operation: PutOperation, + Entry: &Entry{ + Key: "foo", + Value: []byte("bar3"), + }, + }, + { + Operation: DeleteOperation, + Entry: &Entry{ + Key: "deleteme2", + }, + }, + { + Operation: PutOperation, + Entry: &Entry{ + Key: "zip", + Value: []byte("zap3"), + }, + }, + } + + return txns +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/transactions.go b/vendor/github.com/hashicorp/vault/sdk/physical/transactions.go new file mode 100644 index 00000000000..a943c6bd95e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/transactions.go @@ -0,0 +1,150 @@ +package physical + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-multierror" +) + +// TxnEntry is an operation that takes atomically as part of +// a transactional update. Only supported by Transactional backends. +type TxnEntry struct { + Operation Operation + Entry *Entry +} + +func (t *TxnEntry) String() string { + return fmt.Sprintf("Operation: %s. Entry: %s", t.Operation, t.Entry) +} + +// Transactional is an optional interface for backends that +// support doing transactional updates of multiple keys. This is +// required for some features such as replication. +type Transactional interface { + // The function to run a transaction + Transaction(context.Context, []*TxnEntry) error +} + +type TransactionalBackend interface { + Backend + Transactional +} + +type PseudoTransactional interface { + // An internal function should do no locking or permit pool acquisition. + // Depending on the backend and if it natively supports transactions, these + // may simply chain to the normal backend functions. + GetInternal(context.Context, string) (*Entry, error) + PutInternal(context.Context, *Entry) error + DeleteInternal(context.Context, string) error +} + +// Implements the transaction interface +func GenericTransactionHandler(ctx context.Context, t PseudoTransactional, txns []*TxnEntry) (retErr error) { + rollbackStack := make([]*TxnEntry, 0, len(txns)) + var dirty bool + + // Update all of our GET transaction entries, so we can populate existing values back at the wal layer. + for _, txn := range txns { + if txn.Operation == GetOperation { + entry, err := t.GetInternal(ctx, txn.Entry.Key) + if err != nil { + return err + } + if entry != nil { + txn.Entry.Value = entry.Value + } + } + } + + // We walk the transactions in order; each successful operation goes into a + // LIFO for rollback if we hit an error along the way +TxnWalk: + for _, txn := range txns { + switch txn.Operation { + case DeleteOperation: + entry, err := t.GetInternal(ctx, txn.Entry.Key) + if err != nil { + retErr = multierror.Append(retErr, err) + dirty = true + break TxnWalk + } + if entry == nil { + // Nothing to delete or roll back + continue + } + rollbackEntry := &TxnEntry{ + Operation: PutOperation, + Entry: &Entry{ + Key: entry.Key, + Value: entry.Value, + }, + } + err = t.DeleteInternal(ctx, txn.Entry.Key) + if err != nil { + retErr = multierror.Append(retErr, err) + dirty = true + break TxnWalk + } + rollbackStack = append([]*TxnEntry{rollbackEntry}, rollbackStack...) + + case PutOperation: + entry, err := t.GetInternal(ctx, txn.Entry.Key) + if err != nil { + retErr = multierror.Append(retErr, err) + dirty = true + break TxnWalk + } + + // Nothing existed so in fact rolling back requires a delete + var rollbackEntry *TxnEntry + if entry == nil { + rollbackEntry = &TxnEntry{ + Operation: DeleteOperation, + Entry: &Entry{ + Key: txn.Entry.Key, + }, + } + } else { + rollbackEntry = &TxnEntry{ + Operation: PutOperation, + Entry: &Entry{ + Key: entry.Key, + Value: entry.Value, + }, + } + } + + err = t.PutInternal(ctx, txn.Entry) + if err != nil { + retErr = multierror.Append(retErr, err) + dirty = true + break TxnWalk + } + rollbackStack = append([]*TxnEntry{rollbackEntry}, rollbackStack...) + } + } + + // Need to roll back because we hit an error along the way + if dirty { + // While traversing this, if we get an error, we continue anyways in + // best-effort fashion + for _, txn := range rollbackStack { + switch txn.Operation { + case DeleteOperation: + err := t.DeleteInternal(ctx, txn.Entry.Key) + if err != nil { + retErr = multierror.Append(retErr, err) + } + case PutOperation: + err := t.PutInternal(ctx, txn.Entry) + if err != nil { + retErr = multierror.Append(retErr, err) + } + } + } + } + + return +} diff --git a/vendor/github.com/hashicorp/vault/sdk/version/cgo.go b/vendor/github.com/hashicorp/vault/sdk/version/cgo.go new file mode 100644 index 00000000000..5bc93e5bfcd --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/version/cgo.go @@ -0,0 +1,7 @@ +//go:build cgo + +package version + +func init() { + CgoEnabled = true +} diff --git a/vendor/github.com/hashicorp/vault/sdk/version/version.go b/vendor/github.com/hashicorp/vault/sdk/version/version.go new file mode 100644 index 00000000000..78b8eb829cd --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/version/version.go @@ -0,0 +1,80 @@ +package version + +import ( + "bytes" + "fmt" +) + +// VersionInfo +type VersionInfo struct { + Revision string `json:"revision,omitempty"` + Version string `json:"version,omitempty"` + VersionPrerelease string `json:"version_prerelease,omitempty"` + VersionMetadata string `json:"version_metadata,omitempty"` + BuildDate string `json:"build_date,omitempty"` +} + +func GetVersion() *VersionInfo { + ver := Version + rel := VersionPrerelease + md := VersionMetadata + if GitDescribe != "" { + ver = GitDescribe + } + if GitDescribe == "" && rel == "" && VersionPrerelease != "" { + rel = "dev" + } + + return &VersionInfo{ + Revision: GitCommit, + Version: ver, + VersionPrerelease: rel, + VersionMetadata: md, + BuildDate: BuildDate, + } +} + +func (c *VersionInfo) VersionNumber() string { + if Version == "unknown" && VersionPrerelease == "unknown" { + return "(version unknown)" + } + + version := c.Version + + if c.VersionPrerelease != "" { + version = fmt.Sprintf("%s-%s", version, c.VersionPrerelease) + } + + if c.VersionMetadata != "" { + version = fmt.Sprintf("%s+%s", version, c.VersionMetadata) + } + + return version +} + +func (c *VersionInfo) FullVersionNumber(rev bool) string { + var versionString bytes.Buffer + + if Version == "unknown" && VersionPrerelease == "unknown" { + return "Vault (version unknown)" + } + + fmt.Fprintf(&versionString, "Vault v%s", c.Version) + if c.VersionPrerelease != "" { + fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease) + } + + if c.VersionMetadata != "" { + fmt.Fprintf(&versionString, "+%s", c.VersionMetadata) + } + + if rev && c.Revision != "" { + fmt.Fprintf(&versionString, " (%s)", c.Revision) + } + + if c.BuildDate != "" { + fmt.Fprintf(&versionString, ", built %s", c.BuildDate) + } + + return versionString.String() +} diff --git a/vendor/github.com/hashicorp/vault/sdk/version/version_base.go b/vendor/github.com/hashicorp/vault/sdk/version/version_base.go new file mode 100644 index 00000000000..e45626e2cd8 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/version/version_base.go @@ -0,0 +1,17 @@ +package version + +var ( + // The git commit that was compiled. This will be filled in by the compiler. + GitCommit string + GitDescribe string + + // The compilation date. This will be filled in by the compiler. + BuildDate string + + // Whether cgo is enabled or not; set at build time + CgoEnabled bool + + Version = "1.13.0" + VersionPrerelease = "dev1" + VersionMetadata = "" +) diff --git a/vendor/github.com/hashicorp/yamux/.gitignore b/vendor/github.com/hashicorp/yamux/.gitignore new file mode 100644 index 00000000000..836562412fe --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE new file mode 100644 index 00000000000..f0e5c79e181 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/yamux/README.md b/vendor/github.com/hashicorp/yamux/README.md new file mode 100644 index 00000000000..d4db7fc99be --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/README.md @@ -0,0 +1,86 @@ +# Yamux + +Yamux (Yet another Multiplexer) is a multiplexing library for Golang. +It relies on an underlying connection to provide reliability +and ordering, such as TCP or Unix domain sockets, and provides +stream-oriented multiplexing. It is inspired by SPDY but is not +interoperable with it. + +Yamux features include: + +* Bi-directional streams + * Streams can be opened by either client or server + * Useful for NAT traversal + * Server-side push support +* Flow control + * Avoid starvation + * Back-pressure to prevent overwhelming a receiver +* Keep Alives + * Enables persistent connections over a load balancer +* Efficient + * Enables thousands of logical streams with low overhead + +## Documentation + +For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux). + +## Specification + +The full specification for Yamux is provided in the `spec.md` file. +It can be used as a guide to implementors of interoperable libraries. + +## Usage + +Using Yamux is remarkably simple: + +```go + +func client() { + // Get a TCP connection + conn, err := net.Dial(...) + if err != nil { + panic(err) + } + + // Setup client side of yamux + session, err := yamux.Client(conn, nil) + if err != nil { + panic(err) + } + + // Open a new stream + stream, err := session.Open() + if err != nil { + panic(err) + } + + // Stream implements net.Conn + stream.Write([]byte("ping")) +} + +func server() { + // Accept a TCP connection + conn, err := listener.Accept() + if err != nil { + panic(err) + } + + // Setup server side of yamux + session, err := yamux.Server(conn, nil) + if err != nil { + panic(err) + } + + // Accept a stream + stream, err := session.Accept() + if err != nil { + panic(err) + } + + // Listen for a message + buf := make([]byte, 4) + stream.Read(buf) +} + +``` + diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go new file mode 100644 index 00000000000..f6a00199cdd --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/addr.go @@ -0,0 +1,60 @@ +package yamux + +import ( + "fmt" + "net" +) + +// hasAddr is used to get the address from the underlying connection +type hasAddr interface { + LocalAddr() net.Addr + RemoteAddr() net.Addr +} + +// yamuxAddr is used when we cannot get the underlying address +type yamuxAddr struct { + Addr string +} + +func (*yamuxAddr) Network() string { + return "yamux" +} + +func (y *yamuxAddr) String() string { + return fmt.Sprintf("yamux:%s", y.Addr) +} + +// Addr is used to get the address of the listener. +func (s *Session) Addr() net.Addr { + return s.LocalAddr() +} + +// LocalAddr is used to get the local address of the +// underlying connection. +func (s *Session) LocalAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"local"} + } + return addr.LocalAddr() +} + +// RemoteAddr is used to get the address of remote end +// of the underlying connection +func (s *Session) RemoteAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"remote"} + } + return addr.RemoteAddr() +} + +// LocalAddr returns the local address +func (s *Stream) LocalAddr() net.Addr { + return s.session.LocalAddr() +} + +// RemoteAddr returns the remote address +func (s *Stream) RemoteAddr() net.Addr { + return s.session.RemoteAddr() +} diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go new file mode 100644 index 00000000000..2fdbf844a8e --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/const.go @@ -0,0 +1,182 @@ +package yamux + +import ( + "encoding/binary" + "fmt" +) + +// NetError implements net.Error +type NetError struct { + err error + timeout bool + temporary bool +} + +func (e *NetError) Error() string { + return e.err.Error() +} + +func (e *NetError) Timeout() bool { + return e.timeout +} + +func (e *NetError) Temporary() bool { + return e.temporary +} + +var ( + // ErrInvalidVersion means we received a frame with an + // invalid version + ErrInvalidVersion = fmt.Errorf("invalid protocol version") + + // ErrInvalidMsgType means we received a frame with an + // invalid message type + ErrInvalidMsgType = fmt.Errorf("invalid msg type") + + // ErrSessionShutdown is used if there is a shutdown during + // an operation + ErrSessionShutdown = fmt.Errorf("session shutdown") + + // ErrStreamsExhausted is returned if we have no more + // stream ids to issue + ErrStreamsExhausted = fmt.Errorf("streams exhausted") + + // ErrDuplicateStream is used if a duplicate stream is + // opened inbound + ErrDuplicateStream = fmt.Errorf("duplicate stream initiated") + + // ErrReceiveWindowExceeded indicates the window was exceeded + ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") + + // ErrTimeout is used when we reach an IO deadline + ErrTimeout = &NetError{ + err: fmt.Errorf("i/o deadline reached"), + + // Error should meet net.Error interface for timeouts for compatability + // with standard library expectations, such as http servers. + timeout: true, + } + + // ErrStreamClosed is returned when using a closed stream + ErrStreamClosed = fmt.Errorf("stream closed") + + // ErrUnexpectedFlag is set when we get an unexpected flag + ErrUnexpectedFlag = fmt.Errorf("unexpected flag") + + // ErrRemoteGoAway is used when we get a go away from the other side + ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections") + + // ErrConnectionReset is sent if a stream is reset. This can happen + // if the backlog is exceeded, or if there was a remote GoAway. + ErrConnectionReset = fmt.Errorf("connection reset") + + // ErrConnectionWriteTimeout indicates that we hit the "safety valve" + // timeout writing to the underlying stream connection. + ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout") + + // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close + ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout") +) + +const ( + // protoVersion is the only version we support + protoVersion uint8 = 0 +) + +const ( + // Data is used for data frames. They are followed + // by length bytes worth of payload. + typeData uint8 = iota + + // WindowUpdate is used to change the window of + // a given stream. The length indicates the delta + // update to the window. + typeWindowUpdate + + // Ping is sent as a keep-alive or to measure + // the RTT. The StreamID and Length value are echoed + // back in the response. + typePing + + // GoAway is sent to terminate a session. The StreamID + // should be 0 and the length is an error code. + typeGoAway +) + +const ( + // SYN is sent to signal a new stream. May + // be sent with a data payload + flagSYN uint16 = 1 << iota + + // ACK is sent to acknowledge a new stream. May + // be sent with a data payload + flagACK + + // FIN is sent to half-close the given stream. + // May be sent with a data payload. + flagFIN + + // RST is used to hard close a given stream. + flagRST +) + +const ( + // initialStreamWindow is the initial stream window size + initialStreamWindow uint32 = 256 * 1024 +) + +const ( + // goAwayNormal is sent on a normal termination + goAwayNormal uint32 = iota + + // goAwayProtoErr sent on a protocol error + goAwayProtoErr + + // goAwayInternalErr sent on an internal error + goAwayInternalErr +) + +const ( + sizeOfVersion = 1 + sizeOfType = 1 + sizeOfFlags = 2 + sizeOfStreamID = 4 + sizeOfLength = 4 + headerSize = sizeOfVersion + sizeOfType + sizeOfFlags + + sizeOfStreamID + sizeOfLength +) + +type header []byte + +func (h header) Version() uint8 { + return h[0] +} + +func (h header) MsgType() uint8 { + return h[1] +} + +func (h header) Flags() uint16 { + return binary.BigEndian.Uint16(h[2:4]) +} + +func (h header) StreamID() uint32 { + return binary.BigEndian.Uint32(h[4:8]) +} + +func (h header) Length() uint32 { + return binary.BigEndian.Uint32(h[8:12]) +} + +func (h header) String() string { + return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d", + h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length()) +} + +func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) { + h[0] = protoVersion + h[1] = msgType + binary.BigEndian.PutUint16(h[2:4], flags) + binary.BigEndian.PutUint32(h[4:8], streamID) + binary.BigEndian.PutUint32(h[8:12], length) +} diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go new file mode 100644 index 00000000000..0c3e67b022a --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/mux.go @@ -0,0 +1,114 @@ +package yamux + +import ( + "fmt" + "io" + "log" + "os" + "time" +) + +// Config is used to tune the Yamux session +type Config struct { + // AcceptBacklog is used to limit how many streams may be + // waiting an accept. + AcceptBacklog int + + // EnableKeepalive is used to do a period keep alive + // messages using a ping. + EnableKeepAlive bool + + // KeepAliveInterval is how often to perform the keep alive + KeepAliveInterval time.Duration + + // ConnectionWriteTimeout is meant to be a "safety valve" timeout after + // we which will suspect a problem with the underlying connection and + // close it. This is only applied to writes, where's there's generally + // an expectation that things will move along quickly. + ConnectionWriteTimeout time.Duration + + // MaxStreamWindowSize is used to control the maximum + // window size that we allow for a stream. + MaxStreamWindowSize uint32 + + // StreamOpenTimeout is the maximum amount of time that a stream will + // be allowed to remain in pending state while waiting for an ack from the peer. + // Once the timeout is reached the session will be gracefully closed. + // A zero value disables the StreamOpenTimeout allowing unbounded + // blocking on OpenStream calls. + StreamOpenTimeout time.Duration + + // StreamCloseTimeout is the maximum time that a stream will allowed to + // be in a half-closed state when `Close` is called before forcibly + // closing the connection. Forcibly closed connections will empty the + // receive buffer, drop any future packets received for that stream, + // and send a RST to the remote side. + StreamCloseTimeout time.Duration + + // LogOutput is used to control the log destination. Either Logger or + // LogOutput can be set, not both. + LogOutput io.Writer + + // Logger is used to pass in the logger to be used. Either Logger or + // LogOutput can be set, not both. + Logger *log.Logger +} + +// DefaultConfig is used to return a default configuration +func DefaultConfig() *Config { + return &Config{ + AcceptBacklog: 256, + EnableKeepAlive: true, + KeepAliveInterval: 30 * time.Second, + ConnectionWriteTimeout: 10 * time.Second, + MaxStreamWindowSize: initialStreamWindow, + StreamCloseTimeout: 5 * time.Minute, + StreamOpenTimeout: 75 * time.Second, + LogOutput: os.Stderr, + } +} + +// VerifyConfig is used to verify the sanity of configuration +func VerifyConfig(config *Config) error { + if config.AcceptBacklog <= 0 { + return fmt.Errorf("backlog must be positive") + } + if config.KeepAliveInterval == 0 { + return fmt.Errorf("keep-alive interval must be positive") + } + if config.MaxStreamWindowSize < initialStreamWindow { + return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) + } + if config.LogOutput != nil && config.Logger != nil { + return fmt.Errorf("both Logger and LogOutput may not be set, select one") + } else if config.LogOutput == nil && config.Logger == nil { + return fmt.Errorf("one of Logger or LogOutput must be set, select one") + } + return nil +} + +// Server is used to initialize a new server-side connection. +// There must be at most one server-side connection. If a nil config is +// provided, the DefaultConfiguration will be used. +func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, false), nil +} + +// Client is used to initialize a new client-side connection. +// There must be at most one client-side connection. +func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, true), nil +} diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go new file mode 100644 index 00000000000..38fe3ed1f06 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -0,0 +1,732 @@ +package yamux + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Session is used to wrap a reliable ordered connection and to +// multiplex it into multiple streams. +type Session struct { + // remoteGoAway indicates the remote side does + // not want futher connections. Must be first for alignment. + remoteGoAway int32 + + // localGoAway indicates that we should stop + // accepting futher connections. Must be first for alignment. + localGoAway int32 + + // nextStreamID is the next stream we should + // send. This depends if we are a client/server. + nextStreamID uint32 + + // config holds our configuration + config *Config + + // logger is used for our logs + logger *log.Logger + + // conn is the underlying connection + conn io.ReadWriteCloser + + // bufRead is a buffered reader + bufRead *bufio.Reader + + // pings is used to track inflight pings + pings map[uint32]chan struct{} + pingID uint32 + pingLock sync.Mutex + + // streams maps a stream id to a stream, and inflight has an entry + // for any outgoing stream that has not yet been established. Both are + // protected by streamLock. + streams map[uint32]*Stream + inflight map[uint32]struct{} + streamLock sync.Mutex + + // synCh acts like a semaphore. It is sized to the AcceptBacklog which + // is assumed to be symmetric between the client and server. This allows + // the client to avoid exceeding the backlog and instead blocks the open. + synCh chan struct{} + + // acceptCh is used to pass ready streams to the client + acceptCh chan *Stream + + // sendCh is used to mark a stream as ready to send, + // or to send a header out directly. + sendCh chan *sendReady + + // recvDoneCh is closed when recv() exits to avoid a race + // between stream registration and stream shutdown + recvDoneCh chan struct{} + sendDoneCh chan struct{} + + // shutdown is used to safely close a session + shutdown bool + shutdownErr error + shutdownCh chan struct{} + shutdownLock sync.Mutex + shutdownErrLock sync.Mutex +} + +// sendReady is used to either mark a stream as ready +// or to directly send a header +type sendReady struct { + Hdr []byte + mu sync.Mutex // Protects Body from unsafe reads. + Body []byte + Err chan error +} + +// newSession is used to construct a new session +func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { + logger := config.Logger + if logger == nil { + logger = log.New(config.LogOutput, "", log.LstdFlags) + } + + s := &Session{ + config: config, + logger: logger, + conn: conn, + bufRead: bufio.NewReader(conn), + pings: make(map[uint32]chan struct{}), + streams: make(map[uint32]*Stream), + inflight: make(map[uint32]struct{}), + synCh: make(chan struct{}, config.AcceptBacklog), + acceptCh: make(chan *Stream, config.AcceptBacklog), + sendCh: make(chan *sendReady, 64), + recvDoneCh: make(chan struct{}), + sendDoneCh: make(chan struct{}), + shutdownCh: make(chan struct{}), + } + if client { + s.nextStreamID = 1 + } else { + s.nextStreamID = 2 + } + go s.recv() + go s.send() + if config.EnableKeepAlive { + go s.keepalive() + } + return s +} + +// IsClosed does a safe check to see if we have shutdown +func (s *Session) IsClosed() bool { + select { + case <-s.shutdownCh: + return true + default: + return false + } +} + +// CloseChan returns a read-only channel which is closed as +// soon as the session is closed. +func (s *Session) CloseChan() <-chan struct{} { + return s.shutdownCh +} + +// NumStreams returns the number of currently open streams +func (s *Session) NumStreams() int { + s.streamLock.Lock() + num := len(s.streams) + s.streamLock.Unlock() + return num +} + +// Open is used to create a new stream as a net.Conn +func (s *Session) Open() (net.Conn, error) { + conn, err := s.OpenStream() + if err != nil { + return nil, err + } + return conn, nil +} + +// OpenStream is used to create a new stream +func (s *Session) OpenStream() (*Stream, error) { + if s.IsClosed() { + return nil, ErrSessionShutdown + } + if atomic.LoadInt32(&s.remoteGoAway) == 1 { + return nil, ErrRemoteGoAway + } + + // Block if we have too many inflight SYNs + select { + case s.synCh <- struct{}{}: + case <-s.shutdownCh: + return nil, ErrSessionShutdown + } + +GET_ID: + // Get an ID, and check for stream exhaustion + id := atomic.LoadUint32(&s.nextStreamID) + if id >= math.MaxUint32-1 { + return nil, ErrStreamsExhausted + } + if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) { + goto GET_ID + } + + // Register the stream + stream := newStream(s, id, streamInit) + s.streamLock.Lock() + s.streams[id] = stream + s.inflight[id] = struct{}{} + s.streamLock.Unlock() + + if s.config.StreamOpenTimeout > 0 { + go s.setOpenTimeout(stream) + } + + // Send the window update to create + if err := stream.sendWindowUpdate(); err != nil { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore") + } + return nil, err + } + return stream, nil +} + +// setOpenTimeout implements a timeout for streams that are opened but not established. +// If the StreamOpenTimeout is exceeded we assume the peer is unable to ACK, +// and close the session. +// The number of running timers is bounded by the capacity of the synCh. +func (s *Session) setOpenTimeout(stream *Stream) { + timer := time.NewTimer(s.config.StreamOpenTimeout) + defer timer.Stop() + + select { + case <-stream.establishCh: + return + case <-s.shutdownCh: + return + case <-timer.C: + // Timeout reached while waiting for ACK. + // Close the session to force connection re-establishment. + s.logger.Printf("[ERR] yamux: aborted stream open (destination=%s): %v", s.RemoteAddr().String(), ErrTimeout.err) + s.Close() + } +} + +// Accept is used to block until the next available stream +// is ready to be accepted. +func (s *Session) Accept() (net.Conn, error) { + conn, err := s.AcceptStream() + if err != nil { + return nil, err + } + return conn, err +} + +// AcceptStream is used to block until the next available stream +// is ready to be accepted. +func (s *Session) AcceptStream() (*Stream, error) { + select { + case stream := <-s.acceptCh: + if err := stream.sendWindowUpdate(); err != nil { + return nil, err + } + return stream, nil + case <-s.shutdownCh: + return nil, s.shutdownErr + } +} + +// Close is used to close the session and all streams. +// Attempts to send a GoAway before closing the connection. +func (s *Session) Close() error { + s.shutdownLock.Lock() + defer s.shutdownLock.Unlock() + + if s.shutdown { + return nil + } + s.shutdown = true + + s.shutdownErrLock.Lock() + if s.shutdownErr == nil { + s.shutdownErr = ErrSessionShutdown + } + s.shutdownErrLock.Unlock() + + close(s.shutdownCh) + + s.conn.Close() + <-s.recvDoneCh + + s.streamLock.Lock() + defer s.streamLock.Unlock() + for _, stream := range s.streams { + stream.forceClose() + } + <-s.sendDoneCh + return nil +} + +// exitErr is used to handle an error that is causing the +// session to terminate. +func (s *Session) exitErr(err error) { + s.shutdownErrLock.Lock() + if s.shutdownErr == nil { + s.shutdownErr = err + } + s.shutdownErrLock.Unlock() + s.Close() +} + +// GoAway can be used to prevent accepting further +// connections. It does not close the underlying conn. +func (s *Session) GoAway() error { + return s.waitForSend(s.goAway(goAwayNormal), nil) +} + +// goAway is used to send a goAway message +func (s *Session) goAway(reason uint32) header { + atomic.SwapInt32(&s.localGoAway, 1) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeGoAway, 0, 0, reason) + return hdr +} + +// Ping is used to measure the RTT response time +func (s *Session) Ping() (time.Duration, error) { + // Get a channel for the ping + ch := make(chan struct{}) + + // Get a new ping id, mark as pending + s.pingLock.Lock() + id := s.pingID + s.pingID++ + s.pings[id] = ch + s.pingLock.Unlock() + + // Send the ping request + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagSYN, 0, id) + if err := s.waitForSend(hdr, nil); err != nil { + return 0, err + } + + // Wait for a response + start := time.Now() + select { + case <-ch: + case <-time.After(s.config.ConnectionWriteTimeout): + s.pingLock.Lock() + delete(s.pings, id) // Ignore it if a response comes later. + s.pingLock.Unlock() + return 0, ErrTimeout + case <-s.shutdownCh: + return 0, ErrSessionShutdown + } + + // Compute the RTT + return time.Now().Sub(start), nil +} + +// keepalive is a long running goroutine that periodically does +// a ping to keep the connection alive. +func (s *Session) keepalive() { + for { + select { + case <-time.After(s.config.KeepAliveInterval): + _, err := s.Ping() + if err != nil { + if err != ErrSessionShutdown { + s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) + s.exitErr(ErrKeepAliveTimeout) + } + return + } + case <-s.shutdownCh: + return + } + } +} + +// waitForSendErr waits to send a header, checking for a potential shutdown +func (s *Session) waitForSend(hdr header, body []byte) error { + errCh := make(chan error, 1) + return s.waitForSendErr(hdr, body, errCh) +} + +// waitForSendErr waits to send a header with optional data, checking for a +// potential shutdown. Since there's the expectation that sends can happen +// in a timely manner, we enforce the connection write timeout here. +func (s *Session) waitForSendErr(hdr header, body []byte, errCh chan error) error { + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() + + ready := &sendReady{Hdr: hdr, Body: body, Err: errCh} + select { + case s.sendCh <- ready: + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } + + bodyCopy := func() { + if body == nil { + return // A nil body is ignored. + } + + // In the event of session shutdown or connection write timeout, + // we need to prevent `send` from reading the body buffer after + // returning from this function since the caller may re-use the + // underlying array. + ready.mu.Lock() + defer ready.mu.Unlock() + + if ready.Body == nil { + return // Body was already copied in `send`. + } + newBody := make([]byte, len(body)) + copy(newBody, body) + ready.Body = newBody + } + + select { + case err := <-errCh: + return err + case <-s.shutdownCh: + bodyCopy() + return ErrSessionShutdown + case <-timer.C: + bodyCopy() + return ErrConnectionWriteTimeout + } +} + +// sendNoWait does a send without waiting. Since there's the expectation that +// the send happens right here, we enforce the connection write timeout if we +// can't queue the header to be sent. +func (s *Session) sendNoWait(hdr header) error { + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() + + select { + case s.sendCh <- &sendReady{Hdr: hdr}: + return nil + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// send is a long running goroutine that sends data +func (s *Session) send() { + if err := s.sendLoop(); err != nil { + s.exitErr(err) + } +} + +func (s *Session) sendLoop() error { + defer close(s.sendDoneCh) + var bodyBuf bytes.Buffer + for { + bodyBuf.Reset() + + select { + case ready := <-s.sendCh: + // Send a header if ready + if ready.Hdr != nil { + _, err := s.conn.Write(ready.Hdr) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) + asyncSendErr(ready.Err, err) + return err + } + } + + ready.mu.Lock() + if ready.Body != nil { + // Copy the body into the buffer to avoid + // holding a mutex lock during the write. + _, err := bodyBuf.Write(ready.Body) + if err != nil { + ready.Body = nil + ready.mu.Unlock() + s.logger.Printf("[ERR] yamux: Failed to copy body into buffer: %v", err) + asyncSendErr(ready.Err, err) + return err + } + ready.Body = nil + } + ready.mu.Unlock() + + if bodyBuf.Len() > 0 { + // Send data from a body if given + _, err := s.conn.Write(bodyBuf.Bytes()) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) + asyncSendErr(ready.Err, err) + return err + } + } + + // No error, successful send + asyncSendErr(ready.Err, nil) + case <-s.shutdownCh: + return nil + } + } +} + +// recv is a long running goroutine that accepts new data +func (s *Session) recv() { + if err := s.recvLoop(); err != nil { + s.exitErr(err) + } +} + +// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type +var ( + handlers = []func(*Session, header) error{ + typeData: (*Session).handleStreamMessage, + typeWindowUpdate: (*Session).handleStreamMessage, + typePing: (*Session).handlePing, + typeGoAway: (*Session).handleGoAway, + } +) + +// recvLoop continues to receive data until a fatal error is encountered +func (s *Session) recvLoop() error { + defer close(s.recvDoneCh) + hdr := header(make([]byte, headerSize)) + for { + // Read the header + if _, err := io.ReadFull(s.bufRead, hdr); err != nil { + if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") { + s.logger.Printf("[ERR] yamux: Failed to read header: %v", err) + } + return err + } + + // Verify the version + if hdr.Version() != protoVersion { + s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version()) + return ErrInvalidVersion + } + + mt := hdr.MsgType() + if mt < typeData || mt > typeGoAway { + return ErrInvalidMsgType + } + + if err := handlers[mt](s, hdr); err != nil { + return err + } + } +} + +// handleStreamMessage handles either a data or window update frame +func (s *Session) handleStreamMessage(hdr header) error { + // Check for a new stream creation + id := hdr.StreamID() + flags := hdr.Flags() + if flags&flagSYN == flagSYN { + if err := s.incomingStream(id); err != nil { + return err + } + } + + // Get the stream + s.streamLock.Lock() + stream := s.streams[id] + s.streamLock.Unlock() + + // If we do not have a stream, likely we sent a RST + if stream == nil { + // Drain any data on the wire + if hdr.MsgType() == typeData && hdr.Length() > 0 { + s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id) + if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil { + s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err) + return nil + } + } else { + s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr) + } + return nil + } + + // Check if this is a window update + if hdr.MsgType() == typeWindowUpdate { + if err := stream.incrSendWindow(hdr, flags); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil + } + + // Read the new data + if err := stream.readData(hdr, flags, s.bufRead); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil +} + +// handlePing is invokde for a typePing frame +func (s *Session) handlePing(hdr header) error { + flags := hdr.Flags() + pingID := hdr.Length() + + // Check if this is a query, respond back in a separate context so we + // don't interfere with the receiving thread blocking for the write. + if flags&flagSYN == flagSYN { + go func() { + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagACK, 0, pingID) + if err := s.sendNoWait(hdr); err != nil { + s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err) + } + }() + return nil + } + + // Handle a response + s.pingLock.Lock() + ch := s.pings[pingID] + if ch != nil { + delete(s.pings, pingID) + close(ch) + } + s.pingLock.Unlock() + return nil +} + +// handleGoAway is invokde for a typeGoAway frame +func (s *Session) handleGoAway(hdr header) error { + code := hdr.Length() + switch code { + case goAwayNormal: + atomic.SwapInt32(&s.remoteGoAway, 1) + case goAwayProtoErr: + s.logger.Printf("[ERR] yamux: received protocol error go away") + return fmt.Errorf("yamux protocol error") + case goAwayInternalErr: + s.logger.Printf("[ERR] yamux: received internal error go away") + return fmt.Errorf("remote yamux internal error") + default: + s.logger.Printf("[ERR] yamux: received unexpected go away") + return fmt.Errorf("unexpected go away received") + } + return nil +} + +// incomingStream is used to create a new incoming stream +func (s *Session) incomingStream(id uint32) error { + // Reject immediately if we are doing a go away + if atomic.LoadInt32(&s.localGoAway) == 1 { + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) + } + + // Allocate a new stream + stream := newStream(s, id, streamSYNReceived) + + s.streamLock.Lock() + defer s.streamLock.Unlock() + + // Check if stream already exists + if _, ok := s.streams[id]; ok { + s.logger.Printf("[ERR] yamux: duplicate stream declared") + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return ErrDuplicateStream + } + + // Register the stream + s.streams[id] = stream + + // Check if we've exceeded the backlog + select { + case s.acceptCh <- stream: + return nil + default: + // Backlog exceeded! RST the stream + s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") + delete(s.streams, id) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) + } +} + +// closeStream is used to close a stream once both sides have +// issued a close. If there was an in-flight SYN and the stream +// was not yet established, then this will give the credit back. +func (s *Session) closeStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: SYN tracking out of sync") + } + } + delete(s.streams, id) + s.streamLock.Unlock() +} + +// establishStream is used to mark a stream that was in the +// SYN Sent state as established. +func (s *Session) establishStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + delete(s.inflight, id) + } else { + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)") + } + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)") + } + s.streamLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/spec.md b/vendor/github.com/hashicorp/yamux/spec.md new file mode 100644 index 00000000000..183d797bdea --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/spec.md @@ -0,0 +1,140 @@ +# Specification + +We use this document to detail the internal specification of Yamux. +This is used both as a guide for implementing Yamux, but also for +alternative interoperable libraries to be built. + +# Framing + +Yamux uses a streaming connection underneath, but imposes a message +framing so that it can be shared between many logical streams. Each +frame contains a header like: + +* Version (8 bits) +* Type (8 bits) +* Flags (16 bits) +* StreamID (32 bits) +* Length (32 bits) + +This means that each header has a 12 byte overhead. +All fields are encoded in network order (big endian). +Each field is described below: + +## Version Field + +The version field is used for future backward compatibility. At the +current time, the field is always set to 0, to indicate the initial +version. + +## Type Field + +The type field is used to switch the frame message type. The following +message types are supported: + +* 0x0 Data - Used to transmit data. May transmit zero length payloads + depending on the flags. + +* 0x1 Window Update - Used to updated the senders receive window size. + This is used to implement per-session flow control. + +* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat + and do keep-alives over TCP. + +* 0x3 Go Away - Used to close a session. + +## Flag Field + +The flags field is used to provide additional information related +to the message type. The following flags are supported: + +* 0x1 SYN - Signals the start of a new stream. May be sent with a data or + window update message. Also sent with a ping to indicate outbound. + +* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data + or window update message. Also sent with a ping to indicate response. + +* 0x4 FIN - Performs a half-close of a stream. May be sent with a data + message or window update. + +* 0x8 RST - Reset a stream immediately. May be sent with a data or + window update message. + +## StreamID Field + +The StreamID field is used to identify the logical stream the frame +is addressing. The client side should use odd ID's, and the server even. +This prevents any collisions. Additionally, the 0 ID is reserved to represent +the session. + +Both Ping and Go Away messages should always use the 0 StreamID. + +## Length Field + +The meaning of the length field depends on the message type: + +* Data - provides the length of bytes following the header +* Window update - provides a delta update to the window size +* Ping - Contains an opaque value, echoed back +* Go Away - Contains an error code + +# Message Flow + +There is no explicit connection setup, as Yamux relies on an underlying +transport to be provided. However, there is a distinction between client +and server side of the connection. + +## Opening a stream + +To open a stream, an initial data or window update frame is sent +with a new StreamID. The SYN flag should be set to signal a new stream. + +The receiver must then reply with either a data or window update frame +with the StreamID along with the ACK flag to accept the stream or with +the RST flag to reject the stream. + +Because we are relying on the reliable stream underneath, a connection +can begin sending data once the SYN flag is sent. The corresponding +ACK does not need to be received. This is particularly well suited +for an RPC system where a client wants to open a stream and immediately +fire a request without waiting for the RTT of the ACK. + +This does introduce the possibility of a connection being rejected +after data has been sent already. This is a slight semantic difference +from TCP, where the conection cannot be refused after it is opened. +Clients should be prepared to handle this by checking for an error +that indicates a RST was received. + +## Closing a stream + +To close a stream, either side sends a data or window update frame +along with the FIN flag. This does a half-close indicating the sender +will send no further data. + +Once both sides have closed the connection, the stream is closed. + +Alternatively, if an error occurs, the RST flag can be used to +hard close a stream immediately. + +## Flow Control + +When Yamux is initially starts each stream with a 256KB window size. +There is no window size for the session. + +To prevent the streams from stalling, window update frames should be +sent regularly. Yamux can be configured to provide a larger limit for +windows sizes. Both sides assume the initial 256KB window, but can +immediately send a window update as part of the SYN/ACK indicating a +larger window. + +Both sides should track the number of bytes sent in Data frames +only, as only they are tracked as part of the window size. + +## Session termination + +When a session is being terminated, the Go Away message should +be sent. The Length should be set to one of the following to +provide an error code: + +* 0x0 Normal termination +* 0x1 Protocol error +* 0x2 Internal error diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go new file mode 100644 index 00000000000..23d08fcc8da --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -0,0 +1,544 @@ +package yamux + +import ( + "bytes" + "errors" + "io" + "sync" + "sync/atomic" + "time" +) + +type streamState int + +const ( + streamInit streamState = iota + streamSYNSent + streamSYNReceived + streamEstablished + streamLocalClose + streamRemoteClose + streamClosed + streamReset +) + +// Stream is used to represent a logical stream +// within a session. +type Stream struct { + recvWindow uint32 + sendWindow uint32 + + id uint32 + session *Session + + state streamState + stateLock sync.Mutex + + recvBuf *bytes.Buffer + recvLock sync.Mutex + + controlHdr header + controlErr chan error + controlHdrLock sync.Mutex + + sendHdr header + sendErr chan error + sendLock sync.Mutex + + recvNotifyCh chan struct{} + sendNotifyCh chan struct{} + + readDeadline atomic.Value // time.Time + writeDeadline atomic.Value // time.Time + + // establishCh is notified if the stream is established or being closed. + establishCh chan struct{} + + // closeTimer is set with stateLock held to honor the StreamCloseTimeout + // setting on Session. + closeTimer *time.Timer +} + +// newStream is used to construct a new stream within +// a given session for an ID +func newStream(session *Session, id uint32, state streamState) *Stream { + s := &Stream{ + id: id, + session: session, + state: state, + controlHdr: header(make([]byte, headerSize)), + controlErr: make(chan error, 1), + sendHdr: header(make([]byte, headerSize)), + sendErr: make(chan error, 1), + recvWindow: initialStreamWindow, + sendWindow: initialStreamWindow, + recvNotifyCh: make(chan struct{}, 1), + sendNotifyCh: make(chan struct{}, 1), + establishCh: make(chan struct{}, 1), + } + s.readDeadline.Store(time.Time{}) + s.writeDeadline.Store(time.Time{}) + return s +} + +// Session returns the associated stream session +func (s *Stream) Session() *Session { + return s.session +} + +// StreamID returns the ID of this stream +func (s *Stream) StreamID() uint32 { + return s.id +} + +// Read is used to read from the stream +func (s *Stream) Read(b []byte) (n int, err error) { + defer asyncNotify(s.recvNotifyCh) +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamRemoteClose: + fallthrough + case streamClosed: + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + s.stateLock.Unlock() + return 0, io.EOF + } + s.recvLock.Unlock() + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + goto WAIT + } + + // Read any bytes + n, _ = s.recvBuf.Read(b) + s.recvLock.Unlock() + + // Send a window update potentially + err = s.sendWindowUpdate() + if err == ErrSessionShutdown { + err = nil + } + return n, err + +WAIT: + var timeout <-chan time.Time + var timer *time.Timer + readDeadline := s.readDeadline.Load().(time.Time) + if !readDeadline.IsZero() { + delay := readDeadline.Sub(time.Now()) + timer = time.NewTimer(delay) + timeout = timer.C + } + select { + case <-s.recvNotifyCh: + if timer != nil { + timer.Stop() + } + goto START + case <-timeout: + return 0, ErrTimeout + } +} + +// Write is used to write to the stream +func (s *Stream) Write(b []byte) (n int, err error) { + s.sendLock.Lock() + defer s.sendLock.Unlock() + total := 0 + for total < len(b) { + n, err := s.write(b[total:]) + total += n + if err != nil { + return total, err + } + } + return total, nil +} + +// write is used to write to the stream, may return on +// a short write. +func (s *Stream) write(b []byte) (n int, err error) { + var flags uint16 + var max uint32 + var body []byte +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamClosed: + s.stateLock.Unlock() + return 0, ErrStreamClosed + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + window := atomic.LoadUint32(&s.sendWindow) + if window == 0 { + goto WAIT + } + + // Determine the flags if any + flags = s.sendFlags() + + // Send up to our send window + max = min(window, uint32(len(b))) + body = b[:max] + + // Send the header + s.sendHdr.encode(typeData, flags, s.id, max) + if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.sendHdr = header(make([]byte, headerSize)) + } + return 0, err + } + + // Reduce our send window + atomic.AddUint32(&s.sendWindow, ^uint32(max-1)) + + // Unlock + return int(max), err + +WAIT: + var timeout <-chan time.Time + writeDeadline := s.writeDeadline.Load().(time.Time) + if !writeDeadline.IsZero() { + delay := writeDeadline.Sub(time.Now()) + timeout = time.After(delay) + } + select { + case <-s.sendNotifyCh: + goto START + case <-timeout: + return 0, ErrTimeout + } + return 0, nil +} + +// sendFlags determines any flags that are appropriate +// based on the current stream state +func (s *Stream) sendFlags() uint16 { + s.stateLock.Lock() + defer s.stateLock.Unlock() + var flags uint16 + switch s.state { + case streamInit: + flags |= flagSYN + s.state = streamSYNSent + case streamSYNReceived: + flags |= flagACK + s.state = streamEstablished + } + return flags +} + +// sendWindowUpdate potentially sends a window update enabling +// further writes to take place. Must be invoked with the lock. +func (s *Stream) sendWindowUpdate() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + // Determine the delta update + max := s.session.config.MaxStreamWindowSize + var bufLen uint32 + s.recvLock.Lock() + if s.recvBuf != nil { + bufLen = uint32(s.recvBuf.Len()) + } + delta := (max - bufLen) - s.recvWindow + + // Determine the flags if any + flags := s.sendFlags() + + // Check if we can omit the update + if delta < (max/2) && flags == 0 { + s.recvLock.Unlock() + return nil + } + + // Update our window + s.recvWindow += delta + s.recvLock.Unlock() + + // Send the header + s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.controlHdr = header(make([]byte, headerSize)) + } + return err + } + return nil +} + +// sendClose is used to send a FIN +func (s *Stream) sendClose() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + flags := s.sendFlags() + flags |= flagFIN + s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.controlHdr = header(make([]byte, headerSize)) + } + return err + } + return nil +} + +// Close is used to close the stream +func (s *Stream) Close() error { + closeStream := false + s.stateLock.Lock() + switch s.state { + // Opened means we need to signal a close + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamLocalClose + goto SEND_CLOSE + + case streamLocalClose: + case streamRemoteClose: + s.state = streamClosed + closeStream = true + goto SEND_CLOSE + + case streamClosed: + case streamReset: + default: + panic("unhandled state") + } + s.stateLock.Unlock() + return nil +SEND_CLOSE: + // This shouldn't happen (the more realistic scenario to cancel the + // timer is via processFlags) but just in case this ever happens, we + // cancel the timer to prevent dangling timers. + if s.closeTimer != nil { + s.closeTimer.Stop() + s.closeTimer = nil + } + + // If we have a StreamCloseTimeout set we start the timeout timer. + // We do this only if we're not already closing the stream since that + // means this was a graceful close. + // + // This prevents memory leaks if one side (this side) closes and the + // remote side poorly behaves and never responds with a FIN to complete + // the close. After the specified timeout, we clean our resources up no + // matter what. + if !closeStream && s.session.config.StreamCloseTimeout > 0 { + s.closeTimer = time.AfterFunc( + s.session.config.StreamCloseTimeout, s.closeTimeout) + } + + s.stateLock.Unlock() + s.sendClose() + s.notifyWaiting() + if closeStream { + s.session.closeStream(s.id) + } + return nil +} + +// closeTimeout is called after StreamCloseTimeout during a close to +// close this stream. +func (s *Stream) closeTimeout() { + // Close our side forcibly + s.forceClose() + + // Free the stream from the session map + s.session.closeStream(s.id) + + // Send a RST so the remote side closes too. + s.sendLock.Lock() + defer s.sendLock.Unlock() + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, s.id, 0) + s.session.sendNoWait(hdr) +} + +// forceClose is used for when the session is exiting +func (s *Stream) forceClose() { + s.stateLock.Lock() + s.state = streamClosed + s.stateLock.Unlock() + s.notifyWaiting() +} + +// processFlags is used to update the state of the stream +// based on set flags, if any. Lock must be held +func (s *Stream) processFlags(flags uint16) error { + s.stateLock.Lock() + defer s.stateLock.Unlock() + + // Close the stream without holding the state lock + closeStream := false + defer func() { + if closeStream { + if s.closeTimer != nil { + // Stop our close timeout timer since we gracefully closed + s.closeTimer.Stop() + } + + s.session.closeStream(s.id) + } + }() + + if flags&flagACK == flagACK { + if s.state == streamSYNSent { + s.state = streamEstablished + } + asyncNotify(s.establishCh) + s.session.establishStream(s.id) + } + if flags&flagFIN == flagFIN { + switch s.state { + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamRemoteClose + s.notifyWaiting() + case streamLocalClose: + s.state = streamClosed + closeStream = true + s.notifyWaiting() + default: + s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state) + return ErrUnexpectedFlag + } + } + if flags&flagRST == flagRST { + s.state = streamReset + closeStream = true + s.notifyWaiting() + } + return nil +} + +// notifyWaiting notifies all the waiting channels +func (s *Stream) notifyWaiting() { + asyncNotify(s.recvNotifyCh) + asyncNotify(s.sendNotifyCh) + asyncNotify(s.establishCh) +} + +// incrSendWindow updates the size of our send window +func (s *Stream) incrSendWindow(hdr header, flags uint16) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Increase window, unblock a sender + atomic.AddUint32(&s.sendWindow, hdr.Length()) + asyncNotify(s.sendNotifyCh) + return nil +} + +// readData is used to handle a data frame +func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Check that our recv window is not exceeded + length := hdr.Length() + if length == 0 { + return nil + } + + // Wrap in a limited reader + conn = &io.LimitedReader{R: conn, N: int64(length)} + + // Copy into buffer + s.recvLock.Lock() + + if length > s.recvWindow { + s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) + s.recvLock.Unlock() + return ErrRecvWindowExceeded + } + + if s.recvBuf == nil { + // Allocate the receive buffer just-in-time to fit the full data frame. + // This way we can read in the whole packet without further allocations. + s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) + } + copiedLength, err := io.Copy(s.recvBuf, conn) + if err != nil { + s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) + s.recvLock.Unlock() + return err + } + + // Decrement the receive window + s.recvWindow -= uint32(copiedLength) + s.recvLock.Unlock() + + // Unblock any readers + asyncNotify(s.recvNotifyCh) + return nil +} + +// SetDeadline sets the read and write deadlines +func (s *Stream) SetDeadline(t time.Time) error { + if err := s.SetReadDeadline(t); err != nil { + return err + } + if err := s.SetWriteDeadline(t); err != nil { + return err + } + return nil +} + +// SetReadDeadline sets the deadline for blocked and future Read calls. +func (s *Stream) SetReadDeadline(t time.Time) error { + s.readDeadline.Store(t) + asyncNotify(s.recvNotifyCh) + return nil +} + +// SetWriteDeadline sets the deadline for blocked and future Write calls +func (s *Stream) SetWriteDeadline(t time.Time) error { + s.writeDeadline.Store(t) + asyncNotify(s.sendNotifyCh) + return nil +} + +// Shrink is used to compact the amount of buffers utilized +// This is useful when using Yamux in a connection pool to reduce +// the idle memory utilization. +func (s *Stream) Shrink() { + s.recvLock.Lock() + if s.recvBuf != nil && s.recvBuf.Len() == 0 { + s.recvBuf = nil + } + s.recvLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go new file mode 100644 index 00000000000..8a73e9249a6 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/util.go @@ -0,0 +1,43 @@ +package yamux + +import ( + "sync" + "time" +) + +var ( + timerPool = &sync.Pool{ + New: func() interface{} { + timer := time.NewTimer(time.Hour * 1e6) + timer.Stop() + return timer + }, + } +) + +// asyncSendErr is used to try an async send of an error +func asyncSendErr(ch chan error, err error) { + if ch == nil { + return + } + select { + case ch <- err: + default: + } +} + +// asyncNotify is used to signal a waiting goroutine +func asyncNotify(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// min computes the minimum of two values +func min(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/jellydator/ttlcache/v2/CHANGELOG.md b/vendor/github.com/jellydator/ttlcache/v2/CHANGELOG.md new file mode 100644 index 00000000000..dbfa6202b91 --- /dev/null +++ b/vendor/github.com/jellydator/ttlcache/v2/CHANGELOG.md @@ -0,0 +1,94 @@ +# 2.11.0 (December 2021) + +#64: @DoubeDi added a method `GetItems` to retrieve all items in the cache. This method also triggers all callbacks associated with a normal `Get` + +## API changes: + +// GetItems returns a copy of all items in the cache. Returns nil when the cache has been closed. +func (cache *Cache) GetItems() map[string]interface{} { + +# 2.10.0 (December 2021) + +#62 : @nikhilk1701 found a memory leak where removed items are not directly eligible for garbage collection. There are no API changes. + +# 2.9.0 (October 2021) + +#55,#56,#57 : @chenyahui was on fire and greatly improved the peformance of the library. He also got rid of the blocking call to expirationNotification, making the code run twice as fast in the benchmarks! + +# 2.8.1 (September 2021) + +#53 : Avoids recalculation of TTL value returned in API when TTL is extended. by @iczc + +# 2.8.0 (August 2021) + +#51 : The call GetWithTTL(key string) (interface{}, time.Duration, error) is added so that you can retrieve an item, and also know the remaining TTL. Thanks to @asgarciap for contributing. + +# 2.7.0 (June 2021) + +#46 : got panic + +A panic occured in a line that checks the maximum amount of items in the cache. While not definite root cause has been found, there is indeed the possibility of crashing an empty cache if the cache limit is set to 'zero' which codes for infinite. This would lead to removal of the first item in the cache which would panic on an empty cache. + +Fixed this by applying the global cache lock to all configuration options as well. + +# 2.6.0 (May 2021) + +#44 : There are no API changes, but a contribution was made to use https://pkg.go.dev/golang.org/x/sync/singleflight as a way to provide everybody waiting for a key with that key when it's fetched. + +This removes some complexity from the code and will make sure that all callers will get a return value even if there's high concurrency and low TTL (as proven by the test that was added). + +# 2.5.0 (May 2021) + +## API changes: + +* #39 : Allow custom loader function for each key via `GetByLoader` + +Introduce the `SimpleCache` interface for quick-start and basic usage. + +# 2.4.0 (April 2021) + +## API changes: + +* #42 : Add option to get list of keys +* #40: Allow 'Touch' on items without other operation + +// Touch resets the TTL of the key when it exists, returns ErrNotFound if the key is not present. +func (cache *Cache) Touch(key string) error + +// GetKeys returns all keys of items in the cache. Returns nil when the cache has been closed. +func (cache *Cache) GetKeys() []string + +# 2.3.0 (February 2021) + +## API changes: + +* #38: Added func (cache *Cache) SetExpirationReasonCallback(callback ExpireReasonCallback) This wil function will replace SetExpirationCallback(..) in the next major version. + +# 2.2.0 (January 2021) + +## API changes: + +* #37 : a GetMetrics call is now available for some information on hits/misses etc. +* #34 : Errors are now const + +# 2.1.0 (October 2020) + +## API changes + +* `SetCacheSizeLimit(limit int)` a call was contributed to set a cache limit. #35 + +# 2.0.0 (July 2020) + +## Fixes #29, #30, #31 + +## Behavioural changes + +* `Remove(key)` now also calls the expiration callback when it's set +* `Count()` returns zero when the cache is closed + +## API changes + +* `SetLoaderFunction` allows you to provide a function to retrieve data on missing cache keys. +* Operations that affect item behaviour such as `Close`, `Set`, `SetWithTTL`, `Get`, `Remove`, `Purge` now return an error with standard errors `ErrClosed` an `ErrNotFound` instead of a bool or nothing +* `SkipTTLExtensionOnHit` replaces `SkipTtlExtensionOnHit` to satisfy golint +* The callback types are now exported diff --git a/vendor/github.com/jellydator/ttlcache/v2/LICENSE b/vendor/github.com/jellydator/ttlcache/v2/LICENSE new file mode 100644 index 00000000000..f36a3b96780 --- /dev/null +++ b/vendor/github.com/jellydator/ttlcache/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Jellydator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/jellydator/ttlcache/v2/Readme.md b/vendor/github.com/jellydator/ttlcache/v2/Readme.md new file mode 100644 index 00000000000..9c736cdbd03 --- /dev/null +++ b/vendor/github.com/jellydator/ttlcache/v2/Readme.md @@ -0,0 +1,145 @@ +# TTLCache - an in-memory cache with expiration + +**Although v2 of ttlcache is not yet deprecated, v3 should be used as it +contains quite a few additions and improvements.** + +TTLCache is a simple key/value cache in golang with the following functions: + +1. Expiration of items based on time, or custom function +2. Loader function to retrieve missing keys can be provided. Additional `Get` calls on the same key block while fetching is in progress (groupcache style). +3. Individual expiring time or global expiring time, you can choose +4. Auto-Extending expiration on `Get` -or- DNS style TTL, see `SkipTTLExtensionOnHit(bool)` +5. Can trigger callback on key expiration +6. Cleanup resources by calling `Close()` at end of lifecycle. +7. Thread-safe with comprehensive testing suite. This code is in production at bol.com on critical systems. + +Note (issue #25): by default, due to historic reasons, the TTL will be reset on each cache hit and you need to explicitly configure the cache to use a TTL that will not get extended. + +## Usage + +`go get github.com/jellydator/ttlcache/v2` + +You can copy it as a full standalone demo program. The first snippet is basic usage, where the second exploits more options in the cache. + +Basic: +```go +package main + +import ( + "fmt" + "time" + + "github.com/jellydator/ttlcache/v2" +) + +var notFound = ttlcache.ErrNotFound + +func main() { + var cache ttlcache.SimpleCache = ttlcache.NewCache() + + cache.SetTTL(time.Duration(10 * time.Second)) + cache.Set("MyKey", "MyValue") + cache.Set("MyNumber", 1000) + + if val, err := cache.Get("MyKey"); err != notFound { + fmt.Printf("Got it: %s\n", val) + } + + cache.Remove("MyNumber") + cache.Purge() + cache.Close() +} +``` + +Advanced: +```go +package main + +import ( + "fmt" + "time" + + "github.com/jellydator/ttlcache/v2" +) + +var ( + notFound = ttlcache.ErrNotFound + isClosed = ttlcache.ErrClosed +) + +func main() { + newItemCallback := func(key string, value interface{}) { + fmt.Printf("New key(%s) added\n", key) + } + checkExpirationCallback := func(key string, value interface{}) bool { + if key == "key1" { + // if the key equals "key1", the value + // will not be allowed to expire + return false + } + // all other values are allowed to expire + return true + } + + expirationCallback := func(key string, reason ttlcache.EvictionReason, value interface{}) { + fmt.Printf("This key(%s) has expired because of %s\n", key, reason) + } + + loaderFunction := func(key string) (data interface{}, ttl time.Duration, err error) { + ttl = time.Second * 300 + data, err = getFromNetwork(key) + + return data, ttl, err + } + + cache := ttlcache.NewCache() + cache.SetTTL(time.Duration(10 * time.Second)) + cache.SetExpirationReasonCallback(expirationCallback) + cache.SetLoaderFunction(loaderFunction) + cache.SetNewItemCallback(newItemCallback) + cache.SetCheckExpirationCallback(checkExpirationCallback) + cache.SetCacheSizeLimit(2) + + cache.Set("key", "value") + cache.SetWithTTL("keyWithTTL", "value", 10*time.Second) + + if value, exists := cache.Get("key"); exists == nil { + fmt.Printf("Got value: %v\n", value) + } + count := cache.Count() + if result := cache.Remove("keyNNN"); result == notFound { + fmt.Printf("Not found, %d items left\n", count) + } + + cache.Set("key6", "value") + cache.Set("key7", "value") + metrics := cache.GetMetrics() + fmt.Printf("Total inserted: %d\n", metrics.Inserted) + + cache.Close() + +} + +func getFromNetwork(key string) (string, error) { + time.Sleep(time.Millisecond * 30) + return "value", nil +} +``` + +### TTLCache - Some design considerations + +1. The complexity of the current cache is already quite high. Therefore not all requests can be implemented in a straight-forward manner. +2. The locking should be done only in the exported functions and `startExpirationProcessing` of the Cache struct. Else data races can occur or recursive locks are needed, which are both unwanted. +3. I prefer correct functionality over fast tests. It's ok for new tests to take seconds to proof something. + +### Original Project + +TTLCache was forked from [wunderlist/ttlcache](https://github.com/wunderlist/ttlcache) to add extra functions not avaiable in the original scope. +The main differences are: + +1. A item can store any kind of object, previously, only strings could be saved +2. Optionally, you can add callbacks too: check if a value should expire, be notified if a value expires, and be notified when new values are added to the cache +3. The expiration can be either global or per item +4. Items can exist without expiration time (time.Zero) +5. Expirations and callbacks are realtime. Don't have a pooling time to check anymore, now it's done with a heap. +6. A cache count limiter diff --git a/vendor/github.com/jellydator/ttlcache/v2/cache.go b/vendor/github.com/jellydator/ttlcache/v2/cache.go new file mode 100644 index 00000000000..4b2f8201f8f --- /dev/null +++ b/vendor/github.com/jellydator/ttlcache/v2/cache.go @@ -0,0 +1,605 @@ +package ttlcache + +import ( + "sync" + "time" + + "golang.org/x/sync/singleflight" +) + +// CheckExpireCallback is used as a callback for an external check on item expiration +type CheckExpireCallback func(key string, value interface{}) bool + +// ExpireCallback is used as a callback on item expiration or when notifying of an item new to the cache +// Note that ExpireReasonCallback will be the successor of this function in the next major release. +type ExpireCallback func(key string, value interface{}) + +// ExpireReasonCallback is used as a callback on item expiration with extra information why the item expired. +type ExpireReasonCallback func(key string, reason EvictionReason, value interface{}) + +// LoaderFunction can be supplied to retrieve an item where a cache miss occurs. Supply an item specific ttl or Duration.Zero +type LoaderFunction func(key string) (data interface{}, ttl time.Duration, err error) + +// SimpleCache interface enables a quick-start. Interface for basic usage. +type SimpleCache interface { + Get(key string) (interface{}, error) + GetWithTTL(key string) (interface{}, time.Duration, error) + Set(key string, data interface{}) error + SetTTL(ttl time.Duration) error + SetWithTTL(key string, data interface{}, ttl time.Duration) error + Remove(key string) error + Close() error + Purge() error +} + +// Cache is a synchronized map of items that can auto-expire once stale +type Cache struct { + // mutex is shared for all operations that need to be safe + mutex sync.Mutex + // ttl is the global ttl for the cache, can be zero (is infinite) + ttl time.Duration + // actual item storage + items map[string]*item + // lock used to avoid fetching a remote item multiple times + loaderLock *singleflight.Group + expireCallback ExpireCallback + expireReasonCallback ExpireReasonCallback + checkExpireCallback CheckExpireCallback + newItemCallback ExpireCallback + // the queue is used to have an ordered structure to use for expiration and cleanup. + priorityQueue *priorityQueue + expirationNotification chan bool + // hasNotified is used to not schedule new expiration processing when an request is already pending. + hasNotified bool + expirationTime time.Time + skipTTLExtension bool + shutdownSignal chan (chan struct{}) + isShutDown bool + loaderFunction LoaderFunction + sizeLimit int + metrics Metrics +} + +// EvictionReason is an enum that explains why an item was evicted +type EvictionReason int + +const ( + // Removed : explicitly removed from cache via API call + Removed EvictionReason = iota + // EvictedSize : evicted due to exceeding the cache size + EvictedSize + // Expired : the time to live is zero and therefore the item is removed + Expired + // Closed : the cache was closed + Closed +) + +const ( + // ErrClosed is raised when operating on a cache where Close() has already been called. + ErrClosed = constError("cache already closed") + // ErrNotFound indicates that the requested key is not present in the cache + ErrNotFound = constError("key not found") +) + +type constError string + +func (err constError) Error() string { + return string(err) +} + +func (cache *Cache) getItem(key string) (*item, bool, bool) { + item, exists := cache.items[key] + if !exists || item.expired() { + return nil, false, false + } + + // no need to change priority queue when skipTTLExtension is true or the item will not expire + if cache.skipTTLExtension || (item.ttl == 0 && cache.ttl == 0) { + return item, true, false + } + + if item.ttl == 0 { + item.ttl = cache.ttl + } + + item.touch() + + oldExpireTime := cache.priorityQueue.root().expireAt + cache.priorityQueue.update(item) + nowExpireTime := cache.priorityQueue.root().expireAt + + expirationNotification := false + + // notify expiration only if the latest expire time is changed + if (oldExpireTime.IsZero() && !nowExpireTime.IsZero()) || oldExpireTime.After(nowExpireTime) { + expirationNotification = true + } + return item, exists, expirationNotification +} + +func (cache *Cache) startExpirationProcessing() { + timer := time.NewTimer(time.Hour) + for { + var sleepTime time.Duration + cache.mutex.Lock() + cache.hasNotified = false + if cache.priorityQueue.Len() > 0 { + sleepTime = time.Until(cache.priorityQueue.root().expireAt) + if sleepTime < 0 && cache.priorityQueue.root().expireAt.IsZero() { + sleepTime = time.Hour + } else if sleepTime < 0 { + sleepTime = time.Microsecond + } + if cache.ttl > 0 { + sleepTime = min(sleepTime, cache.ttl) + } + + } else if cache.ttl > 0 { + sleepTime = cache.ttl + } else { + sleepTime = time.Hour + } + + cache.expirationTime = time.Now().Add(sleepTime) + cache.mutex.Unlock() + + timer.Reset(sleepTime) + select { + case shutdownFeedback := <-cache.shutdownSignal: + timer.Stop() + cache.mutex.Lock() + if cache.priorityQueue.Len() > 0 { + cache.evictjob(Closed) + } + cache.mutex.Unlock() + shutdownFeedback <- struct{}{} + return + case <-timer.C: + timer.Stop() + cache.mutex.Lock() + if cache.priorityQueue.Len() == 0 { + cache.mutex.Unlock() + continue + } + + cache.cleanjob() + cache.mutex.Unlock() + + case <-cache.expirationNotification: + timer.Stop() + continue + } + } +} + +func (cache *Cache) checkExpirationCallback(item *item, reason EvictionReason) { + if cache.expireCallback != nil { + go cache.expireCallback(item.key, item.data) + } + if cache.expireReasonCallback != nil { + go cache.expireReasonCallback(item.key, reason, item.data) + } +} + +func (cache *Cache) removeItem(item *item, reason EvictionReason) { + cache.metrics.Evicted++ + cache.checkExpirationCallback(item, reason) + cache.priorityQueue.remove(item) + delete(cache.items, item.key) +} + +func (cache *Cache) evictjob(reason EvictionReason) { + // index will only be advanced if the current entry will not be evicted + i := 0 + for item := cache.priorityQueue.items[i]; ; item = cache.priorityQueue.items[i] { + + cache.removeItem(item, reason) + if cache.priorityQueue.Len() == 0 { + return + } + } +} + +func (cache *Cache) cleanjob() { + // index will only be advanced if the current entry will not be evicted + i := 0 + for item := cache.priorityQueue.items[i]; item.expired(); item = cache.priorityQueue.items[i] { + + if cache.checkExpireCallback != nil { + if !cache.checkExpireCallback(item.key, item.data) { + item.touch() + cache.priorityQueue.update(item) + i++ + if i == cache.priorityQueue.Len() { + break + } + continue + } + } + + cache.removeItem(item, Expired) + if cache.priorityQueue.Len() == 0 { + return + } + } +} + +// Close calls Purge after stopping the goroutine that does ttl checking, for a clean shutdown. +// The cache is no longer cleaning up after the first call to Close, repeated calls are safe and return ErrClosed. +func (cache *Cache) Close() error { + cache.mutex.Lock() + if !cache.isShutDown { + cache.isShutDown = true + cache.mutex.Unlock() + feedback := make(chan struct{}) + cache.shutdownSignal <- feedback + <-feedback + close(cache.shutdownSignal) + cache.Purge() + } else { + cache.mutex.Unlock() + return ErrClosed + } + return nil +} + +// Set is a thread-safe way to add new items to the map. +func (cache *Cache) Set(key string, data interface{}) error { + return cache.SetWithTTL(key, data, ItemExpireWithGlobalTTL) +} + +// SetWithTTL is a thread-safe way to add new items to the map with individual ttl. +func (cache *Cache) SetWithTTL(key string, data interface{}, ttl time.Duration) error { + cache.mutex.Lock() + if cache.isShutDown { + cache.mutex.Unlock() + return ErrClosed + } + item, exists, _ := cache.getItem(key) + + oldExpireTime := time.Time{} + if !cache.priorityQueue.isEmpty() { + oldExpireTime = cache.priorityQueue.root().expireAt + } + + if exists { + item.data = data + item.ttl = ttl + } else { + if cache.sizeLimit != 0 && len(cache.items) >= cache.sizeLimit { + cache.removeItem(cache.priorityQueue.items[0], EvictedSize) + } + item = newItem(key, data, ttl) + cache.items[key] = item + } + cache.metrics.Inserted++ + + if item.ttl == 0 { + item.ttl = cache.ttl + } + + item.touch() + + if exists { + cache.priorityQueue.update(item) + } else { + cache.priorityQueue.push(item) + } + + nowExpireTime := cache.priorityQueue.root().expireAt + + cache.mutex.Unlock() + if !exists && cache.newItemCallback != nil { + cache.newItemCallback(key, data) + } + + // notify expiration only if the latest expire time is changed + if (oldExpireTime.IsZero() && !nowExpireTime.IsZero()) || oldExpireTime.After(nowExpireTime) { + cache.notifyExpiration() + } + return nil +} + +// Get is a thread-safe way to lookup items +// Every lookup, also touches the item, hence extending its life +func (cache *Cache) Get(key string) (interface{}, error) { + return cache.GetByLoader(key, nil) +} + +// GetWithTTL has exactly the same behaviour as Get but also returns +// the remaining TTL for a specific item at the moment its retrieved +func (cache *Cache) GetWithTTL(key string) (interface{}, time.Duration, error) { + return cache.GetByLoaderWithTtl(key, nil) +} + +// GetByLoader can take a per key loader function (i.e. to propagate context) +func (cache *Cache) GetByLoader(key string, customLoaderFunction LoaderFunction) (interface{}, error) { + dataToReturn, _, err := cache.GetByLoaderWithTtl(key, customLoaderFunction) + + return dataToReturn, err +} + +// GetByLoaderWithTtl can take a per key loader function (i.e. to propagate context) +func (cache *Cache) GetByLoaderWithTtl(key string, customLoaderFunction LoaderFunction) (interface{}, time.Duration, error) { + cache.mutex.Lock() + if cache.isShutDown { + cache.mutex.Unlock() + return nil, 0, ErrClosed + } + + cache.metrics.Hits++ + item, exists, triggerExpirationNotification := cache.getItem(key) + + var dataToReturn interface{} + ttlToReturn := time.Duration(0) + if exists { + cache.metrics.Retrievals++ + dataToReturn = item.data + if !cache.skipTTLExtension { + ttlToReturn = item.ttl + } else { + ttlToReturn = time.Until(item.expireAt) + } + if ttlToReturn < 0 { + ttlToReturn = 0 + } + } + + var err error + if !exists { + cache.metrics.Misses++ + err = ErrNotFound + } + + loaderFunction := cache.loaderFunction + if customLoaderFunction != nil { + loaderFunction = customLoaderFunction + } + + if loaderFunction == nil || exists { + cache.mutex.Unlock() + } + + if loaderFunction != nil && !exists { + type loaderResult struct { + data interface{} + ttl time.Duration + } + ch := cache.loaderLock.DoChan(key, func() (interface{}, error) { + // cache is not blocked during io + invokeData, ttl, err := cache.invokeLoader(key, loaderFunction) + lr := &loaderResult{ + data: invokeData, + ttl: ttl, + } + return lr, err + }) + cache.mutex.Unlock() + res := <-ch + dataToReturn = res.Val.(*loaderResult).data + ttlToReturn = res.Val.(*loaderResult).ttl + err = res.Err + } + + if triggerExpirationNotification { + cache.notifyExpiration() + } + + return dataToReturn, ttlToReturn, err +} + +func (cache *Cache) notifyExpiration() { + cache.mutex.Lock() + if cache.hasNotified { + cache.mutex.Unlock() + return + } + cache.hasNotified = true + cache.mutex.Unlock() + + cache.expirationNotification <- true +} + +func (cache *Cache) invokeLoader(key string, loaderFunction LoaderFunction) (dataToReturn interface{}, ttl time.Duration, err error) { + dataToReturn, ttl, err = loaderFunction(key) + if err == nil { + err = cache.SetWithTTL(key, dataToReturn, ttl) + if err != nil { + dataToReturn = nil + } + } + return dataToReturn, ttl, err +} + +// Remove removes an item from the cache if it exists, triggers expiration callback when set. Can return ErrNotFound if the entry was not present. +func (cache *Cache) Remove(key string) error { + cache.mutex.Lock() + defer cache.mutex.Unlock() + if cache.isShutDown { + return ErrClosed + } + + object, exists := cache.items[key] + if !exists { + return ErrNotFound + } + cache.removeItem(object, Removed) + + return nil +} + +// Count returns the number of items in the cache. Returns zero when the cache has been closed. +func (cache *Cache) Count() int { + cache.mutex.Lock() + defer cache.mutex.Unlock() + + if cache.isShutDown { + return 0 + } + length := len(cache.items) + return length +} + +// GetKeys returns all keys of items in the cache. Returns nil when the cache has been closed. +func (cache *Cache) GetKeys() []string { + cache.mutex.Lock() + defer cache.mutex.Unlock() + + if cache.isShutDown { + return nil + } + keys := make([]string, len(cache.items)) + i := 0 + for k := range cache.items { + keys[i] = k + i++ + } + return keys +} + +// GetItems returns a copy of all items in the cache. Returns nil when the cache has been closed. +func (cache *Cache) GetItems() map[string]interface{} { + cache.mutex.Lock() + defer cache.mutex.Unlock() + + if cache.isShutDown { + return nil + } + items := make(map[string]interface{}, len(cache.items)) + for k := range cache.items { + item, exists, _ := cache.getItem(k) + if exists { + items[k] = item.data + } + } + return items +} + +// SetTTL sets the global TTL value for items in the cache, which can be overridden at the item level. +func (cache *Cache) SetTTL(ttl time.Duration) error { + cache.mutex.Lock() + + if cache.isShutDown { + cache.mutex.Unlock() + return ErrClosed + } + cache.ttl = ttl + cache.mutex.Unlock() + cache.notifyExpiration() + return nil +} + +// SetExpirationCallback sets a callback that will be called when an item expires +func (cache *Cache) SetExpirationCallback(callback ExpireCallback) { + cache.mutex.Lock() + defer cache.mutex.Unlock() + cache.expireCallback = callback +} + +// SetExpirationReasonCallback sets a callback that will be called when an item expires, includes reason of expiry +func (cache *Cache) SetExpirationReasonCallback(callback ExpireReasonCallback) { + cache.mutex.Lock() + defer cache.mutex.Unlock() + cache.expireReasonCallback = callback +} + +// SetCheckExpirationCallback sets a callback that will be called when an item is about to expire +// in order to allow external code to decide whether the item expires or remains for another TTL cycle +func (cache *Cache) SetCheckExpirationCallback(callback CheckExpireCallback) { + cache.mutex.Lock() + defer cache.mutex.Unlock() + cache.checkExpireCallback = callback +} + +// SetNewItemCallback sets a callback that will be called when a new item is added to the cache +func (cache *Cache) SetNewItemCallback(callback ExpireCallback) { + cache.mutex.Lock() + defer cache.mutex.Unlock() + cache.newItemCallback = callback +} + +// SkipTTLExtensionOnHit allows the user to change the cache behaviour. When this flag is set to true it will +// no longer extend TTL of items when they are retrieved using Get, or when their expiration condition is evaluated +// using SetCheckExpirationCallback. +func (cache *Cache) SkipTTLExtensionOnHit(value bool) { + cache.mutex.Lock() + defer cache.mutex.Unlock() + cache.skipTTLExtension = value +} + +// SetLoaderFunction allows you to set a function to retrieve cache misses. The signature matches that of the Get function. +// Additional Get calls on the same key block while fetching is in progress (groupcache style). +func (cache *Cache) SetLoaderFunction(loader LoaderFunction) { + cache.mutex.Lock() + defer cache.mutex.Unlock() + cache.loaderFunction = loader +} + +// Purge will remove all entries +func (cache *Cache) Purge() error { + cache.mutex.Lock() + defer cache.mutex.Unlock() + if cache.isShutDown { + return ErrClosed + } + cache.metrics.Evicted += int64(len(cache.items)) + cache.items = make(map[string]*item) + cache.priorityQueue = newPriorityQueue() + return nil +} + +// SetCacheSizeLimit sets a limit to the amount of cached items. +// If a new item is getting cached, the closes item to being timed out will be replaced +// Set to 0 to turn off +func (cache *Cache) SetCacheSizeLimit(limit int) { + cache.mutex.Lock() + defer cache.mutex.Unlock() + cache.sizeLimit = limit +} + +// NewCache is a helper to create instance of the Cache struct +func NewCache() *Cache { + + shutdownChan := make(chan chan struct{}) + + cache := &Cache{ + items: make(map[string]*item), + loaderLock: &singleflight.Group{}, + priorityQueue: newPriorityQueue(), + expirationNotification: make(chan bool, 1), + expirationTime: time.Now(), + shutdownSignal: shutdownChan, + isShutDown: false, + loaderFunction: nil, + sizeLimit: 0, + metrics: Metrics{}, + } + go cache.startExpirationProcessing() + return cache +} + +// GetMetrics exposes the metrics of the cache. This is a snapshot copy of the metrics. +func (cache *Cache) GetMetrics() Metrics { + cache.mutex.Lock() + defer cache.mutex.Unlock() + return cache.metrics +} + +// Touch resets the TTL of the key when it exists, returns ErrNotFound if the key is not present. +func (cache *Cache) Touch(key string) error { + cache.mutex.Lock() + defer cache.mutex.Unlock() + item, exists := cache.items[key] + if !exists { + return ErrNotFound + } + item.touch() + return nil +} + +func min(duration time.Duration, second time.Duration) time.Duration { + if duration < second { + return duration + } + return second +} diff --git a/vendor/github.com/jellydator/ttlcache/v2/evictionreason_enumer.go b/vendor/github.com/jellydator/ttlcache/v2/evictionreason_enumer.go new file mode 100644 index 00000000000..dcff95d647f --- /dev/null +++ b/vendor/github.com/jellydator/ttlcache/v2/evictionreason_enumer.go @@ -0,0 +1,52 @@ +// Code generated by "enumer -type EvictionReason"; DO NOT EDIT. + +// +package ttlcache + +import ( + "fmt" +) + +const _EvictionReasonName = "RemovedEvictedSizeExpiredClosed" + +var _EvictionReasonIndex = [...]uint8{0, 7, 18, 25, 31} + +func (i EvictionReason) String() string { + if i < 0 || i >= EvictionReason(len(_EvictionReasonIndex)-1) { + return fmt.Sprintf("EvictionReason(%d)", i) + } + return _EvictionReasonName[_EvictionReasonIndex[i]:_EvictionReasonIndex[i+1]] +} + +var _EvictionReasonValues = []EvictionReason{0, 1, 2, 3} + +var _EvictionReasonNameToValueMap = map[string]EvictionReason{ + _EvictionReasonName[0:7]: 0, + _EvictionReasonName[7:18]: 1, + _EvictionReasonName[18:25]: 2, + _EvictionReasonName[25:31]: 3, +} + +// EvictionReasonString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func EvictionReasonString(s string) (EvictionReason, error) { + if val, ok := _EvictionReasonNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to EvictionReason values", s) +} + +// EvictionReasonValues returns all values of the enum +func EvictionReasonValues() []EvictionReason { + return _EvictionReasonValues +} + +// IsAEvictionReason returns "true" if the value is listed in the enum definition. "false" otherwise +func (i EvictionReason) IsAEvictionReason() bool { + for _, v := range _EvictionReasonValues { + if i == v { + return true + } + } + return false +} diff --git a/vendor/github.com/jellydator/ttlcache/v2/item.go b/vendor/github.com/jellydator/ttlcache/v2/item.go new file mode 100644 index 00000000000..2f78f49ccc9 --- /dev/null +++ b/vendor/github.com/jellydator/ttlcache/v2/item.go @@ -0,0 +1,46 @@ +package ttlcache + +import ( + "time" +) + +const ( + // ItemNotExpire Will avoid the item being expired by TTL, but can still be exired by callback etc. + ItemNotExpire time.Duration = -1 + // ItemExpireWithGlobalTTL will use the global TTL when set. + ItemExpireWithGlobalTTL time.Duration = 0 +) + +func newItem(key string, data interface{}, ttl time.Duration) *item { + item := &item{ + data: data, + ttl: ttl, + key: key, + } + // since nobody is aware yet of this item, it's safe to touch without lock here + item.touch() + return item +} + +type item struct { + key string + data interface{} + ttl time.Duration + expireAt time.Time + queueIndex int +} + +// Reset the item expiration time +func (item *item) touch() { + if item.ttl > 0 { + item.expireAt = time.Now().Add(item.ttl) + } +} + +// Verify if the item is expired +func (item *item) expired() bool { + if item.ttl <= 0 { + return false + } + return item.expireAt.Before(time.Now()) +} diff --git a/vendor/github.com/jellydator/ttlcache/v2/metrics.go b/vendor/github.com/jellydator/ttlcache/v2/metrics.go new file mode 100644 index 00000000000..5f672b12d0b --- /dev/null +++ b/vendor/github.com/jellydator/ttlcache/v2/metrics.go @@ -0,0 +1,15 @@ +package ttlcache + +// Metrics contains common cache metrics so you can calculate hit and miss rates +type Metrics struct { + // succesful inserts + Inserted int64 + // retrieval attempts + Retrievals int64 + // all get calls that were in the cache (excludes loader invocations) + Hits int64 + // entries not in cache (includes loader invocations) + Misses int64 + // items removed from the cache in any way + Evicted int64 +} diff --git a/vendor/github.com/jellydator/ttlcache/v2/priority_queue.go b/vendor/github.com/jellydator/ttlcache/v2/priority_queue.go new file mode 100644 index 00000000000..5d40548863c --- /dev/null +++ b/vendor/github.com/jellydator/ttlcache/v2/priority_queue.go @@ -0,0 +1,85 @@ +package ttlcache + +import ( + "container/heap" +) + +func newPriorityQueue() *priorityQueue { + queue := &priorityQueue{} + heap.Init(queue) + return queue +} + +type priorityQueue struct { + items []*item +} + +func (pq *priorityQueue) isEmpty() bool { + return len(pq.items) == 0 +} + +func (pq *priorityQueue) root() *item { + if len(pq.items) == 0 { + return nil + } + + return pq.items[0] +} + +func (pq *priorityQueue) update(item *item) { + heap.Fix(pq, item.queueIndex) +} + +func (pq *priorityQueue) push(item *item) { + heap.Push(pq, item) +} + +func (pq *priorityQueue) pop() *item { + if pq.Len() == 0 { + return nil + } + return heap.Pop(pq).(*item) +} + +func (pq *priorityQueue) remove(item *item) { + heap.Remove(pq, item.queueIndex) +} + +func (pq priorityQueue) Len() int { + length := len(pq.items) + return length +} + +// Less will consider items with time.Time default value (epoch start) as more than set items. +func (pq priorityQueue) Less(i, j int) bool { + if pq.items[i].expireAt.IsZero() { + return false + } + if pq.items[j].expireAt.IsZero() { + return true + } + return pq.items[i].expireAt.Before(pq.items[j].expireAt) +} + +func (pq priorityQueue) Swap(i, j int) { + pq.items[i], pq.items[j] = pq.items[j], pq.items[i] + pq.items[i].queueIndex = i + pq.items[j].queueIndex = j +} + +func (pq *priorityQueue) Push(x interface{}) { + item := x.(*item) + item.queueIndex = len(pq.items) + pq.items = append(pq.items, item) +} + +func (pq *priorityQueue) Pop() interface{} { + old := pq.items + n := len(old) + item := old[n-1] + item.queueIndex = -1 + // de-reference the element to be popped for Garbage Collector to de-allocate the memory + old[n-1] = nil + pq.items = old[0 : n-1] + return item +} diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 00000000000..91b5cef30eb --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md new file mode 100644 index 00000000000..ca0483711c9 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -0,0 +1,48 @@ +# go-colorable + +[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest) +[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) +[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go new file mode 100644 index 00000000000..416d1bbbf83 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -0,0 +1,38 @@ +//go:build appengine +// +build appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 00000000000..766d94603ac --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,38 @@ +//go:build !windows && !appengine +// +build !windows,!appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 00000000000..1846ad5ab41 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,1047 @@ +//go:build windows && !appengine +// +build windows,!appengine + +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "sync" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) + commonLvbUnderscore = 0x8000 + + cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 +) + +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") +) + +// Writer provides colorable Writer to the console +type Writer struct { + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer + mutex sync.Mutex +} + +// NewColorable returns new instance of Writer which handles escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var mode uint32 + if r, _, _ := procGetConsoleMode.Call(file.Fd(), uintptr(unsafe.Pointer(&mode))); r != 0 && mode&cENABLE_VIRTUAL_TERMINAL_PROCESSING != 0 { + return file + } + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + +// returns Atoi(s) unless s == "" in which case it returns def +func atoiWithDefault(s string, def int) (int, error) { + if s == "" { + return def, nil + } + return strconv.Atoi(s) +} + +// Write writes data on console +func (w *Writer) Write(data []byte) (n int, err error) { + w.mutex.Lock() + defer w.mutex.Unlock() + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } + var plaintext bytes.Buffer +loop: + for { + c1, err := er.ReadByte() + if err != nil { + plaintext.WriteTo(w.out) + break loop + } + if c1 != 0x1b { + plaintext.WriteByte(c1) + continue + } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { + break loop + } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() + continue + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: + continue + } + + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + + var buf bytes.Buffer + var m byte + for i, c := range w.rest.Bytes()[2:] { + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() + break + } + buf.Write([]byte(string(c))) + } + if m == 0 { + break loop + } + + switch m { + case 'A': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n < 1 { + n = 1 + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + var count, written dword + var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var count, written dword + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'X': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var written dword + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case n == 4: + attr |= commonLvbUnderscore + case (1 <= n && n <= 3) || n == 5: + attr |= foregroundIntensity + case n == 7 || n == 27: + attr = + (attr &^ (foregroundMask | backgroundMask)) | + ((attr & foregroundMask) << 4) | + ((attr & backgroundMask) >> 4) + case n == 22: + attr &^= foregroundIntensity + case n == 24: + attr &^= commonLvbUnderscore + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256%len(n256foreAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256%len(n256backAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) + } + } + case 'h': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } + } + case 'l': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + + return len(data), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + var mode uint32 + h := os.Stdout.Fd() + if r, _, _ := procGetConsoleMode.Call(h, uintptr(unsafe.Pointer(&mode))); r != 0 { + if r, _, _ = procSetConsoleMode.Call(h, uintptr(mode|cENABLE_VIRTUAL_TERMINAL_PROCESSING)); r != 0 { + if enabled != nil { + *enabled = true + } + return func() { + procSetConsoleMode.Call(h, uintptr(mode)) + } + } + } + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/go.test.sh b/vendor/github.com/mattn/go-colorable/go.test.sh new file mode 100644 index 00000000000..012162b077c --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 00000000000..05d6f74bf6b --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,57 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable holds writer but removes escape sequence. +type NonColorable struct { + out io.Writer +} + +// NewNonColorable returns new instance of Writer which removes escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write writes data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var plaintext bytes.Buffer +loop: + for { + c1, err := er.ReadByte() + if err != nil { + plaintext.WriteTo(w.out) + break loop + } + if c1 != 0x1b { + plaintext.WriteByte(c1) + continue + } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + if c2 != 0x5b { + continue + } + + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + } + } + + return len(data), nil +} diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 00000000000..65dc692b6b1 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 00000000000..38418353e31 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 00000000000..17d4f90ebcc --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh new file mode 100644 index 00000000000..012162b077c --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 00000000000..39bbcf00f0c --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,19 @@ +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly) && !appengine +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 00000000000..31503226f6c --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,16 @@ +//go:build appengine || js || nacl || wasm +// +build appengine js nacl wasm + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go new file mode 100644 index 00000000000..bae7f9bb3dc --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -0,0 +1,23 @@ +//go:build plan9 +// +build plan9 + +package isatty + +import ( + "syscall" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + path, err := syscall.Fd2path(int(fd)) + if err != nil { + return false + } + return path == "/dev/cons" || path == "/mnt/term/dev/cons" +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 00000000000..0c3acf2dc28 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,21 @@ +//go:build solaris && !appengine +// +build solaris,!appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: https://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/isatty.c +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermio(int(fd), unix.TCGETA) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go new file mode 100644 index 00000000000..67787657fb2 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -0,0 +1,19 @@ +//go:build (linux || aix || zos) && !appengine +// +build linux aix zos +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 00000000000..8e3c99171bf --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,125 @@ +//go:build windows && !appengine +// +build windows,!appengine + +package isatty + +import ( + "errors" + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + objectNameInfo uintptr = 1 + fileNameInfo = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + ntdll = syscall.NewLazyDLL("ntdll.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") + procNtQueryObject = ntdll.NewProc("NtQueryObject") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && + token[0] != `\cygwin` && + token[0] != `\Device\NamedPipe\msys` && + token[0] != `\Device\NamedPipe\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler +// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion +// guys are using Windows XP, this is a workaround for those guys, it will also work on system from +// Windows vista to 10 +// see https://stackoverflow.com/a/18792477 for details +func getFileNameByHandle(fd uintptr) (string, error) { + if procNtQueryObject == nil { + return "", errors.New("ntdll.dll: NtQueryObject not supported") + } + + var buf [4 + syscall.MAX_PATH]uint16 + var result int + r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, + fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) + if r != 0 { + return "", e + } + return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + name, err := getFileNameByHandle(fd) + if err != nil { + return false + } + return isCygwinPipeName(name) + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/mitchellh/go-testing-interface/.travis.yml b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml new file mode 100644 index 00000000000..cca949103af --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.x + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/vendor/github.com/mitchellh/go-testing-interface/LICENSE new file mode 100644 index 00000000000..a3866a291fd --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-testing-interface/README.md b/vendor/github.com/mitchellh/go-testing-interface/README.md new file mode 100644 index 00000000000..ee435adc54d --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/README.md @@ -0,0 +1,60 @@ +# go-testing-interface + +go-testing-interface is a Go library that exports an interface that +`*testing.T` implements as well as a runtime version you can use in its +place. + +The purpose of this library is so that you can export test helpers as a +public API without depending on the "testing" package, since you can't +create a `*testing.T` struct manually. This lets you, for example, use the +public testing APIs to generate mock data at runtime, rather than just at +test time. + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/go-testing-interface). + +Given a test helper written using `go-testing-interface` like this: + + import "github.com/mitchellh/go-testing-interface" + + func TestHelper(t testing.T) { + t.Fatal("I failed") + } + +You can call the test helper in a real test easily: + + import "testing" + + func TestThing(t *testing.T) { + TestHelper(t) + } + +You can also call the test helper at runtime if needed: + + import "github.com/mitchellh/go-testing-interface" + + func main() { + TestHelper(&testing.RuntimeT{}) + } + +## Versioning + +The tagged version matches the version of Go that the interface is +compatible with. For example, the version "1.14.0" is for Go 1.14 and +introduced the `Cleanup` function. The patch version (the ".0" in the +prior example) is used to fix any bugs found in this library and has no +correlation to the supported Go version. + +## Why?! + +**Why would I call a test helper that takes a *testing.T at runtime?** + +You probably shouldn't. The only use case I've seen (and I've had) for this +is to implement a "dev mode" for a service where the test helpers are used +to populate mock data, create a mock DB, perhaps run service dependencies +in-memory, etc. + +Outside of a "dev mode", I've never seen a use case for this and I think +there shouldn't be one since the point of the `testing.T` interface is that +you can fail immediately. diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go new file mode 100644 index 00000000000..86510322abf --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing.go @@ -0,0 +1,112 @@ +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Cleanup(func()) + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Helper() + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Parallel() + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. +// +// Cleanup does NOT work, so if you're using a helper that uses Cleanup, +// there may be dangling resources. +// +// Parallel does not do anything. +type RuntimeT struct { + skipped bool + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Printf(format, args...) + t.Fail() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Print(args...) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Printf(format, args...) + t.FailNow() +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { + return "" +} + +func (t *RuntimeT) Parallel() {} + +func (t *RuntimeT) Skip(args ...interface{}) { + log.Print(args...) + t.SkipNow() +} + +func (t *RuntimeT) SkipNow() { + t.skipped = true +} + +func (t *RuntimeT) Skipf(format string, args ...interface{}) { + log.Printf(format, args...) + t.SkipNow() +} + +func (t *RuntimeT) Skipped() bool { + return t.skipped +} + +func (t *RuntimeT) Helper() {} + +func (t *RuntimeT) Cleanup(func()) {} diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md new file mode 100644 index 00000000000..c758234904e --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -0,0 +1,96 @@ +## 1.5.0 + +* New option `IgnoreUntaggedFields` to ignore decoding to any fields + without `mapstructure` (or the configured tag name) set [GH-277] +* New option `ErrorUnset` which makes it an error if any fields + in a target struct are not set by the decoding process. [GH-225] +* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] +* Decoding to slice from array no longer crashes [GH-265] +* Decode nested struct pointers to map [GH-271] +* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] +* Fix issue where fields with `,omitempty` would sometimes decode + into a map with an empty string key [GH-281] + +## 1.4.3 + +* Fix cases where `json.Number` didn't decode properly [GH-261] + +## 1.4.2 + +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent + to decode hooks properly [GH-232] + +## 1.4.0 + +* A new decode hook type `DecodeHookFuncValue` has been added that has + access to the full values. [GH-183] +* Squash is now supported with embedded fields that are struct pointers [GH-205] +* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] + +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + +## 1.2.3 + +* Fix duplicate entries in Keys list with pointer values. [GH-185] + +## 1.2.2 + +* Do not add unsettable (unexported) values to the unused metadata key + or "remain" value. [GH-150] + +## 1.2.1 + +* Go modules checksum mismatch fix + +## 1.2.0 + +* Added support to capture unused values in a field using the `",remain"` value + in the mapstructure tag. There is an example to showcase usage. +* Added `DecoderConfig` option to always squash embedded structs +* `json.Number` can decode into `uint` types +* Empty slices are preserved and not replaced with nil slices +* Fix panic that can occur in when decoding a map into a nil slice of structs +* Improved package documentation for godoc + +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + +## 1.1.1 + +* Fix panic that can happen in `decodePtr` + +## 1.1.0 + +* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] +* Support struct to struct decoding [GH-137] +* If source map value is nil, then destination map value is nil (instead of empty) +* If source slice value is nil, then destination slice value is nil (instead of empty) +* If source pointer is nil, then destination pointer is set to nil (instead of + allocated zero value of type) + +## 1.0.0 + +* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 00000000000..f9c841a51e0 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 00000000000..0018dc7d9f9 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 00000000000..3a754ca7248 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,279 @@ +package mapstructure + +import ( + "encoding" + "errors" + "fmt" + "net" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2, f3} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Value, to reflect.Value) (interface{}, error) { + + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from.Type(), to.Type(), from.Interface()) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + var err error + data := f.Interface() + + newFrom := f + for _, f1 := range fs { + data, err = DecodeHookExec(f1, newFrom, t) + if err != nil { + return nil, err + } + newFrom = reflect.ValueOf(data) + } + + return data, nil + } +} + +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + return func(a, b reflect.Value) (interface{}, error) { + var allErrs string + var out interface{} + var err error + + for _, f := range ff { + out, err = DecodeHookExec(f, a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue + } + + return out, nil + } + + return nil, errors.New(allErrs) + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip %v", data) + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, err + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i interface{} = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]interface{}) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { + return nil, err + } + return result, nil + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 00000000000..47a99e5af3f --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 00000000000..1efb22ac361 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,1540 @@ +// Package mapstructure exposes functionality to convert one arbitrary +// Go type into another, typically to convert a map[string]interface{} +// into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +// +// The simplest function to start with is Decode. +// +// Field Tags +// +// When decoding to a struct, mapstructure will use the field name by +// default to perform the mapping. For example, if a struct has a field +// "Username" then mapstructure will look for a key in the source value +// of "username" (case insensitive). +// +// type User struct { +// Username string +// } +// +// You can change the behavior of mapstructure by using struct tags. +// The default struct tag that mapstructure looks for is "mapstructure" +// but you can customize it using DecoderConfig. +// +// Renaming Fields +// +// To rename the key that mapstructure looks for, use the "mapstructure" +// tag and set a value directly. For example, to change the "username" example +// above to "user": +// +// type User struct { +// Username string `mapstructure:"user"` +// } +// +// Embedded Structs and Squashing +// +// Embedded structs are treated as if they're another field with that name. +// By default, the two structs below are equivalent when decoding with +// mapstructure: +// +// type Person struct { +// Name string +// } +// +// type Friend struct { +// Person +// } +// +// type Friend struct { +// Person Person +// } +// +// This would require an input that looks like below: +// +// map[string]interface{}{ +// "person": map[string]interface{}{"name": "alice"}, +// } +// +// If your "person" value is NOT nested, then you can append ",squash" to +// your tag value and mapstructure will treat it as if the embedded struct +// were part of the struct directly. Example: +// +// type Friend struct { +// Person `mapstructure:",squash"` +// } +// +// Now the following input would be accepted: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// When decoding from a struct to a map, the squash tag squashes the struct +// fields into a single map. Using the example structs from above: +// +// Friend{Person: Person{Name: "alice"}} +// +// Will be decoded into a map: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// DecoderConfig has a field that changes the behavior of mapstructure +// to always squash embedded structs. +// +// Remainder Values +// +// If there are any unmapped keys in the source value, mapstructure by +// default will silently ignore them. You can error by setting ErrorUnused +// in DecoderConfig. If you're using Metadata you can also maintain a slice +// of the unused keys. +// +// You can also use the ",remain" suffix on your tag to collect all unused +// values in a map. The field with this tag MUST be a map type and should +// probably be a "map[string]interface{}" or "map[interface{}]interface{}". +// See example below: +// +// type Friend struct { +// Name string +// Other map[string]interface{} `mapstructure:",remain"` +// } +// +// Given the input below, Other would be populated with the other +// values that weren't used (everything but "name"): +// +// map[string]interface{}{ +// "name": "bob", +// "address": "123 Maple St.", +// } +// +// Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value. The zero value of all types is specified in the Go +// specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. +// +// type Source struct { +// Age int `mapstructure:",omitempty"` +// } +// +// Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// +// Other Configuration +// +// mapstructure is highly configurable. See the DecoderConfig struct +// for other features and options that are supported. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or +// DecodeHookFuncValue. +// Values are a superset of Types (Values can return types), and Types are a +// superset of Kinds (Types can return Kinds) and are generally a richer thing +// to use, but Kinds are simpler if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target +// values. +type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. The + // DecodeHook is called for every map and value in the input. This means + // that if a struct has embedded fields with squash tags the decode hook + // is called only once with all of the input data, not once for each + // embedded struct. + // + // If an error is returned, the entire decode will fail with that error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // If ErrorUnset is true, then it is an error for there to exist + // fields in the result that were not set in the decoding process + // (extra fields). This only applies to decoding to a struct. This + // will affect all nested structs as well. + ErrorUnset bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Squash will squash embedded structs. A squash tag may also be + // added to an individual struct field using a tag. For example: + // + // type Parent struct { + // Child `mapstructure:",squash"` + // } + Squash bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string + + // IgnoreUntaggedFields ignores all struct fields without explicit + // TagName, comparable to `mapstructure:"-"` as default behaviour. + IgnoreUntaggedFields bool + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string + + // Unset is a slice of field names that were found in the result interface + // but weren't set in the decoding process since there was no matching value + // in the input + Unset []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input interface{}, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + + if config.Metadata.Unset == nil { + config.Metadata.Unset = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input interface{}) error { + return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + var inputVal reflect.Value + if input != nil { + inputVal = reflect.ValueOf(input) + + // We need to check here if input is a typed nil. Typed nils won't + // match the "input == nil" below so we check that here. + if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { + input = nil + } + } + + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + return nil + } + + if !inputVal.IsValid() { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) + if err != nil { + return fmt.Errorf("error decoding '%s': %s", name, err) + } + } + + var err error + outputKind := getKind(outVal) + addMetaKey := true + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + addMetaKey, err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, outputKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if addMetaKey && d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil + } + + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseInt(str, 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseUint(str, 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := strconv.ParseUint(string(jn), 0, 64) + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetUint(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + f, err := strconv.ParseFloat(str, val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type and based on the input type jump to the proper func + dataVal := reflect.Indirect(reflect.ValueOf(data)) + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + name+"["+strconv.Itoa(i)+"]", + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + errors := make([]string, 0) + + // If the input data is empty, then we just match what the input data is. + if dataVal.Len() == 0 { + if dataVal.IsNil() { + if !val.IsNil() { + val.Set(dataVal) + } + } else { + // Set to empty allocated value + val.Set(valMap) + } + + return nil + } + + for _, k := range dataVal.MapKeys() { + fieldName := name + "[" + k.String() + "]" + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) + } + + tagValue := f.Tag.Get(d.config.TagName) + keyName := f.Name + + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + + // If Squash is set in the config, we squash the field down. + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) + + // Determine the name of the key in the map + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { + continue + } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + continue + } + + // If "squash" is specified in the tag, we squash the field down. + squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 + if squash { + // When squashing, the embedded type can be a pointer to a struct. + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + v = v.Elem() + } + + // The final type must be a struct + if v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } + } + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { + keyName = keyNameTagValue + } + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) + if err != nil { + return err + } + + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { + // If the input data is nil, then we want to just set the output + // pointer to be nil as well. + isNil := data == nil + if !isNil { + switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { + case reflect.Chan, + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.Ptr, + reflect.Slice: + isNil = v.IsNil() + } + } + if isNil { + if !val.IsNil() && val.CanSet() { + nilValue := reflect.New(val.Type()).Elem() + val.Set(nilValue) + } + + return true, nil + } + + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return false, err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return false, err + } + } + return false, nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // If we have a non array/slice type then we first attempt to convert. + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Slice and array we use the normal logic + case dataValKind == reflect.Slice, dataValKind == reflect.Array: + break + + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + // Create slice of maps of other sizes + return d.decodeSlice(name, []interface{}{data}, val) + + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + + // If the input value is nil, then don't allocate since empty != nil + if dataValKind != reflect.Array && dataVal.IsNil() { + return nil + } + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + switch dataValKind { + case reflect.Map: + return d.decodeStructFromMap(name, dataVal, val) + + case reflect.Struct: + // Not the most efficient way to do this but we can optimize later if + // we want to. To convert from struct to struct we go to map first + // as an intermediary. + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]interface{})(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { + return err + } + + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) + return result + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + targetValKeysUnused := make(map[interface{}]struct{}) + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + + // remainField is set to a valid field set with the "remain" tag if + // we are keeping track of remaining values. + var remainField *field + + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldVal := structVal.Field(i) + if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { + // Handle embedded struct pointers as embedded structs. + fieldVal = fieldVal.Elem() + } + + // If "squash" is specified in the tag, we squash the field down. + squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous + remain := false + + // We always parse the tags cause we're looking for other tags too + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + + if tag == "remain" { + remain = true + break + } + } + + if squash { + if fieldVal.Kind() != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) + } else { + structs = append(structs, fieldVal) + } + continue + } + + // Build our field + if remain { + remainField = &field{fieldType, fieldVal} + } else { + // Normal struct field, store it away + fields = append(fields, field{fieldType, fieldVal}) + } + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if d.config.MatchName(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Remember it for potential errors and metadata. + targetValKeysUnused[fieldName] = struct{}{} + continue + } + } + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = name + "." + fieldName + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errors = appendErrors(errors, err) + } + } + + // If we have a "remain"-tagged field and we have unused keys then + // we put the unused keys directly into the remain field. + if remainField != nil && len(dataValKeysUnused) > 0 { + // Build a map of only the unused values + remain := map[interface{}]interface{}{} + for key := range dataValKeysUnused { + remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() + } + + // Decode it as-if we were just decoding this map onto our map. + if err := d.decodeMap(name, remain, remainField.val); err != nil { + errors = appendErrors(errors, err) + } + + // Set the map to nil so we have none so that the next check will + // not error (ErrorUnused) + dataValKeysUnused = nil + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { + keys := make([]string, 0, len(targetValKeysUnused)) + for rawKey := range targetValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + for rawKey := range targetValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) + } + } + + return nil +} + +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} + +func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields + return true + } + if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + return true + } + } + return false +} + +func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return v + } + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v +} diff --git a/vendor/github.com/oklog/run/.gitignore b/vendor/github.com/oklog/run/.gitignore new file mode 100644 index 00000000000..a1338d68517 --- /dev/null +++ b/vendor/github.com/oklog/run/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/oklog/run/LICENSE b/vendor/github.com/oklog/run/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/oklog/run/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md new file mode 100644 index 00000000000..eba7d11cf3a --- /dev/null +++ b/vendor/github.com/oklog/run/README.md @@ -0,0 +1,75 @@ +# run + +[![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run) +[![Build Status](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Foklog%2Frun%2Fbadge&style=flat-square&label=build)](https://github.com/oklog/run/actions?query=workflow%3ATest) +[![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run) +[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE) + +run.Group is a universal mechanism to manage goroutine lifecycles. + +Create a zero-value run.Group, and then add actors to it. Actors are defined as +a pair of functions: an **execute** function, which should run synchronously; +and an **interrupt** function, which, when invoked, should cause the execute +function to return. Finally, invoke Run, which concurrently runs all of the +actors, waits until the first actor exits, invokes the interrupt functions, and +finally returns control to the caller only once all actors have returned. This +general-purpose API allows callers to model pretty much any runnable task, and +achieve well-defined lifecycle semantics for the group. + +run.Group was written to manage component lifecycles in func main for +[OK Log](https://github.com/oklog/oklog). +But it's useful in any circumstance where you need to orchestrate multiple +goroutines as a unit whole. +[Click here](https://www.youtube.com/watch?v=LHe1Cb_Ud_M&t=15m45s) to see a +video of a talk where run.Group is described. + +## Examples + +### context.Context + +```go +ctx, cancel := context.WithCancel(context.Background()) +g.Add(func() error { + return myProcess(ctx, ...) +}, func(error) { + cancel() +}) +``` + +### net.Listener + +```go +ln, _ := net.Listen("tcp", ":8080") +g.Add(func() error { + return http.Serve(ln, nil) +}, func(error) { + ln.Close() +}) +``` + +### io.ReadCloser + +```go +var conn io.ReadCloser = ... +g.Add(func() error { + s := bufio.NewScanner(conn) + for s.Scan() { + println(s.Text()) + } + return s.Err() +}, func(error) { + conn.Close() +}) +``` + +## Comparisons + +Package run is somewhat similar to package +[errgroup](https://godoc.org/golang.org/x/sync/errgroup), +except it doesn't require actor goroutines to understand context semantics. + +It's somewhat similar to package +[tomb.v1](https://godoc.org/gopkg.in/tomb.v1) or +[tomb.v2](https://godoc.org/gopkg.in/tomb.v2), +except it has a much smaller API surface, delegating e.g. staged shutdown of +goroutines to the caller. diff --git a/vendor/github.com/oklog/run/actors.go b/vendor/github.com/oklog/run/actors.go new file mode 100644 index 00000000000..ef93495d3f0 --- /dev/null +++ b/vendor/github.com/oklog/run/actors.go @@ -0,0 +1,38 @@ +package run + +import ( + "context" + "fmt" + "os" + "os/signal" +) + +// SignalHandler returns an actor, i.e. an execute and interrupt func, that +// terminates with SignalError when the process receives one of the provided +// signals, or the parent context is canceled. +func SignalHandler(ctx context.Context, signals ...os.Signal) (execute func() error, interrupt func(error)) { + ctx, cancel := context.WithCancel(ctx) + return func() error { + c := make(chan os.Signal, 1) + signal.Notify(c, signals...) + select { + case sig := <-c: + return SignalError{Signal: sig} + case <-ctx.Done(): + return ctx.Err() + } + }, func(error) { + cancel() + } +} + +// SignalError is returned by the signal handler's execute function +// when it terminates due to a received signal. +type SignalError struct { + Signal os.Signal +} + +// Error implements the error interface. +func (e SignalError) Error() string { + return fmt.Sprintf("received signal %s", e.Signal) +} diff --git a/vendor/github.com/oklog/run/group.go b/vendor/github.com/oklog/run/group.go new file mode 100644 index 00000000000..832d47dd169 --- /dev/null +++ b/vendor/github.com/oklog/run/group.go @@ -0,0 +1,62 @@ +// Package run implements an actor-runner with deterministic teardown. It is +// somewhat similar to package errgroup, except it does not require actor +// goroutines to understand context semantics. This makes it suitable for use in +// more circumstances; for example, goroutines which are handling connections +// from net.Listeners, or scanning input from a closable io.Reader. +package run + +// Group collects actors (functions) and runs them concurrently. +// When one actor (function) returns, all actors are interrupted. +// The zero value of a Group is useful. +type Group struct { + actors []actor +} + +// Add an actor (function) to the group. Each actor must be pre-emptable by an +// interrupt function. That is, if interrupt is invoked, execute should return. +// Also, it must be safe to call interrupt even after execute has returned. +// +// The first actor (function) to return interrupts all running actors. +// The error is passed to the interrupt functions, and is returned by Run. +func (g *Group) Add(execute func() error, interrupt func(error)) { + g.actors = append(g.actors, actor{execute, interrupt}) +} + +// Run all actors (functions) concurrently. +// When the first actor returns, all others are interrupted. +// Run only returns when all actors have exited. +// Run returns the error returned by the first exiting actor. +func (g *Group) Run() error { + if len(g.actors) == 0 { + return nil + } + + // Run each actor. + errors := make(chan error, len(g.actors)) + for _, a := range g.actors { + go func(a actor) { + errors <- a.execute() + }(a) + } + + // Wait for the first actor to stop. + err := <-errors + + // Signal all actors to stop. + for _, a := range g.actors { + a.interrupt(err) + } + + // Wait for all actors to stop. + for i := 1; i < cap(errors); i++ { + <-errors + } + + // Return the original error. + return err +} + +type actor struct { + execute func() error + interrupt func(error) +} diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore new file mode 100644 index 00000000000..5e987350471 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/.gitignore @@ -0,0 +1,34 @@ +# Created by https://www.gitignore.io/api/macos + +### macOS ### +*.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +# End of https://www.gitignore.io/api/macos + +cmd/*/*exe +.idea \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml new file mode 100644 index 00000000000..fd6c6db713d --- /dev/null +++ b/vendor/github.com/pierrec/lz4/.travis.yml @@ -0,0 +1,24 @@ +language: go + +env: + - GO111MODULE=off + +go: + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - master + +matrix: + fast_finish: true + allow_failures: + - go: master + +sudo: false + +script: + - go test -v -cpu=2 + - go test -v -cpu=2 -race + - go test -v -cpu=2 -tags noasm + - go test -v -cpu=2 -race -tags noasm diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE new file mode 100644 index 00000000000..bd899d8353d --- /dev/null +++ b/vendor/github.com/pierrec/lz4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md new file mode 100644 index 00000000000..4ee388e81bf --- /dev/null +++ b/vendor/github.com/pierrec/lz4/README.md @@ -0,0 +1,90 @@ +# lz4 : LZ4 compression in pure Go + +[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4) +[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4) +[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4) +[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags) + +## Overview + +This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks. +The implementation is based on the reference C [one](https://github.com/lz4/lz4). + +## Install + +Assuming you have the go toolchain installed: + +``` +go get github.com/pierrec/lz4 +``` + +There is a command line interface tool to compress and decompress LZ4 files. + +``` +go install github.com/pierrec/lz4/cmd/lz4c +``` + +Usage + +``` +Usage of lz4c: + -version + print the program version + +Subcommands: +Compress the given files or from stdin to stdout. +compress [arguments] [ ...] + -bc + enable block checksum + -l int + compression level (0=fastest) + -sc + disable stream checksum + -size string + block max size [64K,256K,1M,4M] (default "4M") + +Uncompress the given files or from stdin to stdout. +uncompress [arguments] [ ...] + +``` + + +## Example + +``` +// Compress and uncompress an input string. +s := "hello world" +r := strings.NewReader(s) + +// The pipe will uncompress the data from the writer. +pr, pw := io.Pipe() +zw := lz4.NewWriter(pw) +zr := lz4.NewReader(pr) + +go func() { + // Compress the input string. + _, _ = io.Copy(zw, r) + _ = zw.Close() // Make sure the writer is closed + _ = pw.Close() // Terminate the pipe +}() + +_, _ = io.Copy(os.Stdout, zr) + +// Output: +// hello world +``` + +## Contributing + +Contributions are very welcome for bug fixing, performance improvements...! + +- Open an issue with a proper description +- Send a pull request with appropriate test case(s) + +## Contributors + +Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far! + +Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder. + +Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code. diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go new file mode 100644 index 00000000000..664d9be580d --- /dev/null +++ b/vendor/github.com/pierrec/lz4/block.go @@ -0,0 +1,413 @@ +package lz4 + +import ( + "encoding/binary" + "math/bits" + "sync" +) + +// blockHash hashes the lower 6 bytes into a value < htSize. +func blockHash(x uint64) uint32 { + const prime6bytes = 227718039650203 + return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) +} + +// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. +func CompressBlockBound(n int) int { + return n + n/255 + 16 +} + +// UncompressBlock uncompresses the source buffer into the destination one, +// and returns the uncompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlock(src, dst []byte) (int, error) { + if len(src) == 0 { + return 0, nil + } + if di := decodeBlock(dst, src); di >= 0 { + return di, nil + } + return 0, ErrInvalidSourceShortBuffer +} + +// CompressBlock compresses the source buffer into the destination one. +// This is the fast version of LZ4 compression and also the default one. +// +// The argument hashTable is scratch space for a hash table used by the +// compressor. If provided, it should have length at least 1<<16. If it is +// shorter (or nil), CompressBlock allocates its own hash table. +// +// The size of the compressed data is returned. +// +// If the destination buffer size is lower than CompressBlockBound and +// the compressed size is 0 and no error, then the data is incompressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { + defer recoverBlock(&err) + + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compression. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + if len(hashTable) < htSize { + htIface := htPool.Get() + defer htPool.Put(htIface) + hashTable = (*(htIface).(*[htSize]int))[:] + } + // Prove to the compiler the table has at least htSize elements. + // The compiler can see that "uint32() >> hashShift" cannot be out of bounds. + hashTable = hashTable[:htSize] + + // si: Current position of the search. + // anchor: Position of the current literals. + var si, di, anchor int + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals + } + + // Fast scan strategy: the hash table only stores the last 4 bytes sequences. + for si < sn { + // Hash the next 6 bytes (sequence)... + match := binary.LittleEndian.Uint64(src[si:]) + h := blockHash(match) + h2 := blockHash(match >> 8) + + // We check a match at s, s+1 and s+2 and pick the first one we get. + // Checking 3 only requires us to load the source one. + ref := hashTable[h] + ref2 := hashTable[h2] + hashTable[h] = si + hashTable[h2] = si + 1 + offset := si - ref + + // If offset <= 0 we got an old entry in the hash table. + if offset <= 0 || offset >= winSize || // Out of window. + uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches. + // No match. Start calculating another hash. + // The processor can usually do this out-of-order. + h = blockHash(match >> 16) + ref = hashTable[h] + + // Check the second match at si+1 + si += 1 + offset = si - ref2 + + if offset <= 0 || offset >= winSize || + uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) { + // No match. Check the third match at si+2 + si += 1 + offset = si - ref + hashTable[h] = si + + if offset <= 0 || offset >= winSize || + uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) { + // Skip one extra byte (at si+3) before we check 3 matches again. + si += 2 + (si-anchor)>>adaptSkipLog + continue + } + } + } + + // Match found. + lLen := si - anchor // Literal length. + // We already matched 4 bytes. + mLen := 4 + + // Extend backwards if we can, reducing literals. + tOff := si - offset - 1 + for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] { + si-- + tOff-- + lLen-- + mLen++ + } + + // Add the match length, so we continue search at the end. + // Use mLen to store the offset base. + si, mLen = si+mLen, si+minMatch + + // Find the longest match by looking by batches of 8 bytes. + for si+8 < sn { + x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) + if x == 0 { + si += 8 + } else { + // Stop is first non-zero byte. + si += bits.TrailingZeros64(x) >> 3 + break + } + } + + mLen = si - mLen + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(l) + } + di++ + + // Literals. + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen + 2 + anchor = si + + // Encode offset. + _ = dst[di] // Bound check elimination. + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(mLen) + di++ + } + // Check if we can load next values. + if si >= sn { + break + } + // Hash match end-2 + h = blockHash(binary.LittleEndian.Uint64(src[si-2:])) + hashTable[h] = si - 2 + } + +lastLiterals: + if isNotCompressible && anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if isNotCompressible && di >= anchor { + // Incompressible. + return 0, nil + } + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) + return di, nil +} + +// Pool of hash tables for CompressBlock. +var htPool = sync.Pool{ + New: func() interface{} { + return new([htSize]int) + }, +} + +// blockHash hashes 4 bytes into a value < winSize. +func blockHashHC(x uint32) uint32 { + const hasher uint32 = 2654435761 // Knuth multiplicative hash. + return x * hasher >> (32 - winSizeLog) +} + +// CompressBlockHC compresses the source buffer src into the destination dst +// with max search depth (use 0 or negative value for no max). +// +// CompressBlockHC compression ratio is better than CompressBlock but it is also slower. +// +// The size of the compressed data is returned. +// +// If the destination buffer size is lower than CompressBlockBound and +// the compressed size is 0 and no error, then the data is incompressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) { + defer recoverBlock(&err) + + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compression. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + + var si, di, anchor int + + // hashTable: stores the last position found for a given hash + // chainTable: stores previous positions for a given hash + var hashTable, chainTable [winSize]int + + if depth <= 0 { + depth = winSize + } + + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals + } + + for si < sn { + // Hash the next 4 bytes (sequence). + match := binary.LittleEndian.Uint32(src[si:]) + h := blockHashHC(match) + + // Follow the chain until out of window and give the longest match. + mLen := 0 + offset := 0 + for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] { + // The first (mLen==0) or next byte (mLen>=minMatch) at current match length + // must match to improve on the match length. + if src[next+mLen] != src[si+mLen] { + continue + } + ml := 0 + // Compare the current position with a previous with the same hash. + for ml < sn-si { + x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:]) + if x == 0 { + ml += 8 + } else { + // Stop is first non-zero byte. + ml += bits.TrailingZeros64(x) >> 3 + break + } + } + if ml < minMatch || ml <= mLen { + // Match too small (>adaptSkipLog + continue + } + + // Match found. + // Update hash/chain tables with overlapping bytes: + // si already hashed, add everything from si+1 up to the match length. + winStart := si + 1 + if ws := si + mLen - winSize; ws > winStart { + winStart = ws + } + for si, ml := winStart, si+mLen; si < ml; { + match >>= 8 + match |= uint32(src[si+3]) << 24 + h := blockHashHC(match) + chainTable[si&winMask] = hashTable[h] + hashTable[h] = si + si++ + } + + lLen := si - anchor + si += mLen + mLen -= minMatch // Match length does not include minMatch. + + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(l) + } + di++ + + // Literals. + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen + anchor = si + + // Encode offset. + di += 2 + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(mLen) + di++ + } + } + + if isNotCompressible && anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. +lastLiterals: + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + lLen -= 0xF + for ; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if isNotCompressible && di >= anchor { + // Incompressible. + return 0, nil + } + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) + return di, nil +} diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go new file mode 100644 index 00000000000..bc5e78d40f0 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/debug.go @@ -0,0 +1,23 @@ +// +build lz4debug + +package lz4 + +import ( + "fmt" + "os" + "path/filepath" + "runtime" +) + +const debugFlag = true + +func debug(args ...interface{}) { + _, file, line, _ := runtime.Caller(1) + file = filepath.Base(file) + + f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0]) + if f[len(f)-1] != '\n' { + f += "\n" + } + fmt.Fprintf(os.Stderr, f, args[1:]...) +} diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go new file mode 100644 index 00000000000..44211ad9645 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/debug_stub.go @@ -0,0 +1,7 @@ +// +build !lz4debug + +package lz4 + +const debugFlag = false + +func debug(args ...interface{}) {} diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.go b/vendor/github.com/pierrec/lz4/decode_amd64.go new file mode 100644 index 00000000000..43cc14fbe2e --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_amd64.go @@ -0,0 +1,8 @@ +// +build !appengine +// +build gc +// +build !noasm + +package lz4 + +//go:noescape +func decodeBlock(dst, src []byte) int diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.s b/vendor/github.com/pierrec/lz4/decode_amd64.s new file mode 100644 index 00000000000..20fef39759c --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_amd64.s @@ -0,0 +1,375 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// AX scratch +// BX scratch +// CX scratch +// DX token +// +// DI &dst +// SI &src +// R8 &dst + len(dst) +// R9 &src + len(src) +// R11 &dst +// R12 short output end +// R13 short input end +// func decodeBlock(dst, src []byte) int +// using 50 bytes of stack currently +TEXT ·decodeBlock(SB), NOSPLIT, $64-56 + MOVQ dst_base+0(FP), DI + MOVQ DI, R11 + MOVQ dst_len+8(FP), R8 + ADDQ DI, R8 + + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R9 + ADDQ SI, R9 + + // shortcut ends + // short output end + MOVQ R8, R12 + SUBQ $32, R12 + // short input end + MOVQ R9, R13 + SUBQ $16, R13 + +loop: + // for si < len(src) + CMPQ SI, R9 + JGE end + + // token := uint32(src[si]) + MOVBQZX (SI), DX + INCQ SI + + // lit_len = token >> 4 + // if lit_len > 0 + // CX = lit_len + MOVQ DX, CX + SHRQ $4, CX + + // if lit_len != 0xF + CMPQ CX, $0xF + JEQ lit_len_loop_pre + CMPQ DI, R12 + JGE lit_len_loop_pre + CMPQ SI, R13 + JGE lit_len_loop_pre + + // copy shortcut + + // A two-stage shortcut for the most common case: + // 1) If the literal length is 0..14, and there is enough space, + // enter the shortcut and copy 16 bytes on behalf of the literals + // (in the fast mode, only 8 bytes can be safely copied this way). + // 2) Further if the match length is 4..18, copy 18 bytes in a similar + // manner; but we ensure that there's enough space in the output for + // those 18 bytes earlier, upon entering the shortcut (in other words, + // there is a combined check for both stages). + + // copy literal + MOVOU (SI), X0 + MOVOU X0, (DI) + ADDQ CX, DI + ADDQ CX, SI + + MOVQ DX, CX + ANDQ $0xF, CX + + // The second stage: prepare for match copying, decode full info. + // If it doesn't work out, the info won't be wasted. + // offset := uint16(data[:2]) + MOVWQZX (SI), DX + ADDQ $2, SI + + MOVQ DI, AX + SUBQ DX, AX + CMPQ AX, DI + JGT err_short_buf + + // if we can't do the second stage then jump straight to read the + // match length, we already have the offset. + CMPQ CX, $0xF + JEQ match_len_loop_pre + CMPQ DX, $8 + JLT match_len_loop_pre + CMPQ AX, R11 + JLT err_short_buf + + // memcpy(op + 0, match + 0, 8); + MOVQ (AX), BX + MOVQ BX, (DI) + // memcpy(op + 8, match + 8, 8); + MOVQ 8(AX), BX + MOVQ BX, 8(DI) + // memcpy(op +16, match +16, 2); + MOVW 16(AX), BX + MOVW BX, 16(DI) + + ADDQ $4, DI // minmatch + ADDQ CX, DI + + // shortcut complete, load next token + JMP loop + +lit_len_loop_pre: + // if lit_len > 0 + CMPQ CX, $0 + JEQ offset + CMPQ CX, $0xF + JNE copy_literal + +lit_len_loop: + // for src[si] == 0xFF + CMPB (SI), $0xFF + JNE lit_len_finalise + + // bounds check src[si+1] + MOVQ SI, AX + ADDQ $1, AX + CMPQ AX, R9 + JGT err_short_buf + + // lit_len += 0xFF + ADDQ $0xFF, CX + INCQ SI + JMP lit_len_loop + +lit_len_finalise: + // lit_len += int(src[si]) + // si++ + MOVBQZX (SI), AX + ADDQ AX, CX + INCQ SI + +copy_literal: + // bounds check src and dst + MOVQ SI, AX + ADDQ CX, AX + CMPQ AX, R9 + JGT err_short_buf + + MOVQ DI, AX + ADDQ CX, AX + CMPQ AX, R8 + JGT err_short_buf + + // whats a good cut off to call memmove? + CMPQ CX, $16 + JGT memmove_lit + + // if len(dst[di:]) < 16 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $16 + JLT memmove_lit + + // if len(src[si:]) < 16 + MOVQ R9, AX + SUBQ SI, AX + CMPQ AX, $16 + JLT memmove_lit + + MOVOU (SI), X0 + MOVOU X0, (DI) + + JMP finish_lit_copy + +memmove_lit: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + // spill + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) // need len to inc SI, DI after + MOVB DX, 48(SP) + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVB 48(SP), DX + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + +finish_lit_copy: + ADDQ CX, SI + ADDQ CX, DI + + CMPQ SI, R9 + JGE end + +offset: + // CX := mLen + // free up DX to use for offset + MOVQ DX, CX + + MOVQ SI, AX + ADDQ $2, AX + CMPQ AX, R9 + JGT err_short_buf + + // offset + // DX := int(src[si]) | int(src[si+1])<<8 + MOVWQZX (SI), DX + ADDQ $2, SI + + // 0 offset is invalid + CMPQ DX, $0 + JEQ err_corrupt + + ANDB $0xF, CX + +match_len_loop_pre: + // if mlen != 0xF + CMPB CX, $0xF + JNE copy_match + +match_len_loop: + // for src[si] == 0xFF + // lit_len += 0xFF + CMPB (SI), $0xFF + JNE match_len_finalise + + // bounds check src[si+1] + MOVQ SI, AX + ADDQ $1, AX + CMPQ AX, R9 + JGT err_short_buf + + ADDQ $0xFF, CX + INCQ SI + JMP match_len_loop + +match_len_finalise: + // lit_len += int(src[si]) + // si++ + MOVBQZX (SI), AX + ADDQ AX, CX + INCQ SI + +copy_match: + // mLen += minMatch + ADDQ $4, CX + + // check we have match_len bytes left in dst + // di+match_len < len(dst) + MOVQ DI, AX + ADDQ CX, AX + CMPQ AX, R8 + JGT err_short_buf + + // DX = offset + // CX = match_len + // BX = &dst + (di - offset) + MOVQ DI, BX + SUBQ DX, BX + + // check BX is within dst + // if BX < &dst + CMPQ BX, R11 + JLT err_short_buf + + // if offset + match_len < di + MOVQ BX, AX + ADDQ CX, AX + CMPQ DI, AX + JGT copy_interior_match + + // AX := len(dst[:di]) + // MOVQ DI, AX + // SUBQ R11, AX + + // copy 16 bytes at a time + // if di-offset < 16 copy 16-(di-offset) bytes to di + // then do the remaining + +copy_match_loop: + // for match_len >= 0 + // dst[di] = dst[i] + // di++ + // i++ + MOVB (BX), AX + MOVB AX, (DI) + INCQ DI + INCQ BX + DECQ CX + + CMPQ CX, $0 + JGT copy_match_loop + + JMP loop + +copy_interior_match: + CMPQ CX, $16 + JGT memmove_match + + // if len(dst[di:]) < 16 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $16 + JLT memmove_match + + MOVOU (BX), X0 + MOVOU X0, (DI) + + ADDQ CX, DI + JMP loop + +memmove_match: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ BX, 8(SP) + MOVQ CX, 16(SP) + // spill + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) // need len to inc SI, DI after + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 // TODO: make these sensible numbers + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + + ADDQ CX, DI + JMP loop + +err_corrupt: + MOVQ $-1, ret+48(FP) + RET + +err_short_buf: + MOVQ $-2, ret+48(FP) + RET + +end: + SUBQ R11, DI + MOVQ DI, ret+48(FP) + RET diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go new file mode 100644 index 00000000000..919888edf7d --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_other.go @@ -0,0 +1,98 @@ +// +build !amd64 appengine !gc noasm + +package lz4 + +func decodeBlock(dst, src []byte) (ret int) { + const hasError = -2 + defer func() { + if recover() != nil { + ret = hasError + } + }() + + var si, di int + for { + // Literals and match lengths (token). + b := int(src[si]) + si++ + + // Literals. + if lLen := b >> 4; lLen > 0 { + switch { + case lLen < 0xF && si+16 < len(src): + // Shortcut 1 + // if we have enough room in src and dst, and the literals length + // is small enough (0..14) then copy all 16 bytes, even if not all + // are part of the literals. + copy(dst[di:], src[si:si+16]) + si += lLen + di += lLen + if mLen := b & 0xF; mLen < 0xF { + // Shortcut 2 + // if the match length (4..18) fits within the literals, then copy + // all 18 bytes, even if not all are part of the literals. + mLen += 4 + if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset { + i := di - offset + end := i + 18 + if end > len(dst) { + // The remaining buffer may not hold 18 bytes. + // See https://github.com/pierrec/lz4/issues/51. + end = len(dst) + } + copy(dst[di:], dst[i:end]) + si += 2 + di += mLen + continue + } + } + case lLen == 0xF: + for src[si] == 0xFF { + lLen += 0xFF + si++ + } + lLen += int(src[si]) + si++ + fallthrough + default: + copy(dst[di:di+lLen], src[si:si+lLen]) + si += lLen + di += lLen + } + } + if si >= len(src) { + return di + } + + offset := int(src[si]) | int(src[si+1])<<8 + if offset == 0 { + return hasError + } + si += 2 + + // Match. + mLen := b & 0xF + if mLen == 0xF { + for src[si] == 0xFF { + mLen += 0xFF + si++ + } + mLen += int(src[si]) + si++ + } + mLen += minMatch + + // Copy the match. + expanded := dst[di-offset:] + if mLen > offset { + // Efficiently copy the match dst[di-offset:di] into the dst slice. + bytesToCopy := offset * (mLen / offset) + for n := offset; n <= bytesToCopy+offset; n *= 2 { + copy(expanded[n:], expanded[:n]) + } + di += bytesToCopy + mLen -= bytesToCopy + } + di += copy(dst[di:di+mLen], expanded[:mLen]) + } +} diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go new file mode 100644 index 00000000000..1c45d1813ce --- /dev/null +++ b/vendor/github.com/pierrec/lz4/errors.go @@ -0,0 +1,30 @@ +package lz4 + +import ( + "errors" + "fmt" + "os" + rdebug "runtime/debug" +) + +var ( + // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed + // block is corrupted or the destination buffer is not large enough for the uncompressed data. + ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short") + // ErrInvalid is returned when reading an invalid LZ4 archive. + ErrInvalid = errors.New("lz4: bad magic number") + // ErrBlockDependency is returned when attempting to decompress an archive created with block dependency. + ErrBlockDependency = errors.New("lz4: block dependency not supported") + // ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position. + ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent") +) + +func recoverBlock(e *error) { + if r := recover(); r != nil && *e == nil { + if debugFlag { + fmt.Fprintln(os.Stderr, r) + rdebug.PrintStack() + } + *e = ErrInvalidSourceShortBuffer + } +} diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go new file mode 100644 index 00000000000..7a76a6bce2b --- /dev/null +++ b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go @@ -0,0 +1,223 @@ +// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). +// (https://github.com/Cyan4973/XXH/) +package xxh32 + +import ( + "encoding/binary" +) + +const ( + prime1 uint32 = 2654435761 + prime2 uint32 = 2246822519 + prime3 uint32 = 3266489917 + prime4 uint32 = 668265263 + prime5 uint32 = 374761393 + + primeMask = 0xFFFFFFFF + prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984 + prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535 +) + +// XXHZero represents an xxhash32 object with seed 0. +type XXHZero struct { + v1 uint32 + v2 uint32 + v3 uint32 + v4 uint32 + totalLen uint64 + buf [16]byte + bufused int +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (xxh XXHZero) Sum(b []byte) []byte { + h32 := xxh.Sum32() + return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) +} + +// Reset resets the Hash to its initial state. +func (xxh *XXHZero) Reset() { + xxh.v1 = prime1plus2 + xxh.v2 = prime2 + xxh.v3 = 0 + xxh.v4 = prime1minus + xxh.totalLen = 0 + xxh.bufused = 0 +} + +// Size returns the number of bytes returned by Sum(). +func (xxh *XXHZero) Size() int { + return 4 +} + +// BlockSize gives the minimum number of bytes accepted by Write(). +func (xxh *XXHZero) BlockSize() int { + return 1 +} + +// Write adds input bytes to the Hash. +// It never returns an error. +func (xxh *XXHZero) Write(input []byte) (int, error) { + if xxh.totalLen == 0 { + xxh.Reset() + } + n := len(input) + m := xxh.bufused + + xxh.totalLen += uint64(n) + + r := len(xxh.buf) - m + if n < r { + copy(xxh.buf[m:], input) + xxh.bufused += len(input) + return n, nil + } + + p := 0 + // Causes compiler to work directly from registers instead of stack: + v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4 + if m > 0 { + // some data left from previous update + copy(xxh.buf[xxh.bufused:], input[:r]) + xxh.bufused += len(input) - r + + // fast rotl(13) + buf := xxh.buf[:16] // BCE hint. + v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1 + p = r + xxh.bufused = 0 + } + + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4 + + copy(xxh.buf[xxh.bufused:], input[p:]) + xxh.bufused += len(input) - p + + return n, nil +} + +// Sum32 returns the 32 bits Hash value. +func (xxh *XXHZero) Sum32() uint32 { + h32 := uint32(xxh.totalLen) + if h32 >= 16 { + h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4) + } else { + h32 += prime5 + } + + p := 0 + n := xxh.bufused + buf := xxh.buf + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for ; p < n; p++ { + h32 += uint32(buf[p]) * prime5 + h32 = rol11(h32) * prime1 + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +// ChecksumZero returns the 32bits Hash value. +func ChecksumZero(input []byte) uint32 { + n := len(input) + h32 := uint32(n) + + if n < 16 { + h32 += prime5 + } else { + v1 := prime1plus2 + v2 := prime2 + v3 := uint32(0) + v4 := prime1minus + p := 0 + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + input = input[p:] + n -= p + h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + } + + p := 0 + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for p < n { + h32 += uint32(input[p]) * prime5 + h32 = rol11(h32) * prime1 + p++ + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +// Uint32Zero hashes x with seed 0. +func Uint32Zero(x uint32) uint32 { + h := prime5 + 4 + x*prime3 + h = rol17(h) * prime4 + h ^= h >> 15 + h *= prime2 + h ^= h >> 13 + h *= prime3 + h ^= h >> 16 + return h +} + +func rol1(u uint32) uint32 { + return u<<1 | u>>31 +} + +func rol7(u uint32) uint32 { + return u<<7 | u>>25 +} + +func rol11(u uint32) uint32 { + return u<<11 | u>>21 +} + +func rol12(u uint32) uint32 { + return u<<12 | u>>20 +} + +func rol13(u uint32) uint32 { + return u<<13 | u>>19 +} + +func rol17(u uint32) uint32 { + return u<<17 | u>>15 +} + +func rol18(u uint32) uint32 { + return u<<18 | u>>14 +} diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go new file mode 100644 index 00000000000..a3284bdf708 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4.go @@ -0,0 +1,116 @@ +// Package lz4 implements reading and writing lz4 compressed data (a frame), +// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html. +// +// Although the block level compression and decompression functions are exposed and are fully compatible +// with the lz4 block format definition, they are low level and should not be used directly. +// For a complete description of an lz4 compressed block, see: +// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html +// +// See https://github.com/Cyan4973/lz4 for the reference C implementation. +// +package lz4 + +import ( + "math/bits" + "sync" +) + +const ( + // Extension is the LZ4 frame file name extension + Extension = ".lz4" + // Version is the LZ4 frame format version + Version = 1 + + frameMagic uint32 = 0x184D2204 + frameSkipMagic uint32 = 0x184D2A50 + frameMagicLegacy uint32 = 0x184C2102 + + // The following constants are used to setup the compression algorithm. + minMatch = 4 // the minimum size of the match sequence size (4 bytes) + winSizeLog = 16 // LZ4 64Kb window size limit + winSize = 1 << winSizeLog + winMask = winSize - 1 // 64Kb window of previous data for dependent blocks + compressedBlockFlag = 1 << 31 + compressedBlockMask = compressedBlockFlag - 1 + + // hashLog determines the size of the hash table used to quickly find a previous match position. + // Its value influences the compression speed and memory usage, the lower the faster, + // but at the expense of the compression ratio. + // 16 seems to be the best compromise for fast compression. + hashLog = 16 + htSize = 1 << hashLog + + mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes. +) + +// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. +const ( + blockSize64K = 1 << (16 + 2*iota) + blockSize256K + blockSize1M + blockSize4M +) + +var ( + // Keep a pool of buffers for each valid block sizes. + bsMapValue = [...]*sync.Pool{ + newBufferPool(2 * blockSize64K), + newBufferPool(2 * blockSize256K), + newBufferPool(2 * blockSize1M), + newBufferPool(2 * blockSize4M), + } +) + +// newBufferPool returns a pool for buffers of the given size. +func newBufferPool(size int) *sync.Pool { + return &sync.Pool{ + New: func() interface{} { + return make([]byte, size) + }, + } +} + +// getBuffer returns a buffer to its pool. +func getBuffer(size int) []byte { + idx := blockSizeValueToIndex(size) - 4 + return bsMapValue[idx].Get().([]byte) +} + +// putBuffer returns a buffer to its pool. +func putBuffer(size int, buf []byte) { + if cap(buf) > 0 { + idx := blockSizeValueToIndex(size) - 4 + bsMapValue[idx].Put(buf[:cap(buf)]) + } +} +func blockSizeIndexToValue(i byte) int { + return 1 << (16 + 2*uint(i)) +} +func isValidBlockSize(size int) bool { + const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M + + return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1 +} +func blockSizeValueToIndex(size int) byte { + return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2) +} + +// Header describes the various flags that can be set on a Writer or obtained from a Reader. +// The default values match those of the LZ4 frame format definition +// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). +// +// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. +// It is the caller's responsibility to check them if necessary. +type Header struct { + BlockChecksum bool // Compressed blocks checksum flag. + NoChecksum bool // Frame checksum flag. + BlockMaxSize int // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. + Size uint64 // Frame total size. It is _not_ computed by the Writer. + CompressionLevel int // Compression level (higher is better, use 0 for fastest compression). + done bool // Header processed flag (Read or Write and checked). +} + +// Reset reset internal status +func (h *Header) Reset() { + h.done = false +} diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go new file mode 100644 index 00000000000..9a0fb00709d --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4_go1.10.go @@ -0,0 +1,29 @@ +//+build go1.10 + +package lz4 + +import ( + "fmt" + "strings" +) + +func (h Header) String() string { + var s strings.Builder + + s.WriteString(fmt.Sprintf("%T{", h)) + if h.BlockChecksum { + s.WriteString("BlockChecksum: true ") + } + if h.NoChecksum { + s.WriteString("NoChecksum: true ") + } + if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { + s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) + } + if l := h.CompressionLevel; l != 0 { + s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) + } + s.WriteByte('}') + + return s.String() +} diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go new file mode 100644 index 00000000000..12c761a2e7f --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go @@ -0,0 +1,29 @@ +//+build !go1.10 + +package lz4 + +import ( + "bytes" + "fmt" +) + +func (h Header) String() string { + var s bytes.Buffer + + s.WriteString(fmt.Sprintf("%T{", h)) + if h.BlockChecksum { + s.WriteString("BlockChecksum: true ") + } + if h.NoChecksum { + s.WriteString("NoChecksum: true ") + } + if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { + s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) + } + if l := h.CompressionLevel; l != 0 { + s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) + } + s.WriteByte('}') + + return s.String() +} diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go new file mode 100644 index 00000000000..87dd72bd0db --- /dev/null +++ b/vendor/github.com/pierrec/lz4/reader.go @@ -0,0 +1,335 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "io" + "io/ioutil" + + "github.com/pierrec/lz4/internal/xxh32" +) + +// Reader implements the LZ4 frame decoder. +// The Header is set after the first call to Read(). +// The Header may change between Read() calls in case of concatenated frames. +type Reader struct { + Header + // Handler called when a block has been successfully read. + // It provides the number of bytes read. + OnBlockDone func(size int) + + buf [8]byte // Scrap buffer. + pos int64 // Current position in src. + src io.Reader // Source. + zdata []byte // Compressed data. + data []byte // Uncompressed data. + idx int // Index of unread bytes into data. + checksum xxh32.XXHZero // Frame hash. + skip int64 // Bytes to skip before next read. + dpos int64 // Position in dest +} + +// NewReader returns a new LZ4 frame decoder. +// No access to the underlying io.Reader is performed. +func NewReader(src io.Reader) *Reader { + r := &Reader{src: src} + return r +} + +// readHeader checks the frame magic number and parses the frame descriptoz. +// Skippable frames are supported even as a first frame although the LZ4 +// specifications recommends skippable frames not to be used as first frames. +func (z *Reader) readHeader(first bool) error { + defer z.checksum.Reset() + + buf := z.buf[:] + for { + magic, err := z.readUint32() + if err != nil { + z.pos += 4 + if !first && err == io.ErrUnexpectedEOF { + return io.EOF + } + return err + } + if magic == frameMagic { + break + } + if magic>>8 != frameSkipMagic>>8 { + return ErrInvalid + } + skipSize, err := z.readUint32() + if err != nil { + return err + } + z.pos += 4 + m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) + if err != nil { + return err + } + z.pos += m + } + + // Header. + if _, err := io.ReadFull(z.src, buf[:2]); err != nil { + return err + } + z.pos += 8 + + b := buf[0] + if v := b >> 6; v != Version { + return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version) + } + if b>>5&1 == 0 { + return ErrBlockDependency + } + z.BlockChecksum = b>>4&1 > 0 + frameSize := b>>3&1 > 0 + z.NoChecksum = b>>2&1 == 0 + + bmsID := buf[1] >> 4 & 0x7 + if bmsID < 4 || bmsID > 7 { + return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID) + } + bSize := blockSizeIndexToValue(bmsID - 4) + z.BlockMaxSize = bSize + + // Allocate the compressed/uncompressed buffers. + // The compressed buffer cannot exceed the uncompressed one. + if n := 2 * bSize; cap(z.zdata) < n { + z.zdata = make([]byte, n, n) + } + if debugFlag { + debug("header block max size id=%d size=%d", bmsID, bSize) + } + z.zdata = z.zdata[:bSize] + z.data = z.zdata[:cap(z.zdata)][bSize:] + z.idx = len(z.data) + + _, _ = z.checksum.Write(buf[0:2]) + + if frameSize { + buf := buf[:8] + if _, err := io.ReadFull(z.src, buf); err != nil { + return err + } + z.Size = binary.LittleEndian.Uint64(buf) + z.pos += 8 + _, _ = z.checksum.Write(buf) + } + + // Header checksum. + if _, err := io.ReadFull(z.src, buf[:1]); err != nil { + return err + } + z.pos++ + if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { + return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h) + } + + z.Header.done = true + if debugFlag { + debug("header read: %v", z.Header) + } + + return nil +} + +// Read decompresses data from the underlying source into the supplied buffer. +// +// Since there can be multiple streams concatenated, Header values may +// change between calls to Read(). If that is the case, no data is actually read from +// the underlying io.Reader, to allow for potential input buffer resizing. +func (z *Reader) Read(buf []byte) (int, error) { + if debugFlag { + debug("Read buf len=%d", len(buf)) + } + if !z.Header.done { + if err := z.readHeader(true); err != nil { + return 0, err + } + if debugFlag { + debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d", + len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx) + } + } + + if len(buf) == 0 { + return 0, nil + } + + if z.idx == len(z.data) { + // No data ready for reading, process the next block. + if debugFlag { + debug("reading block from writer") + } + // Reset uncompressed buffer + z.data = z.zdata[:cap(z.zdata)][len(z.zdata):] + + // Block length: 0 = end of frame, highest bit set: uncompressed. + bLen, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if bLen == 0 { + // End of frame reached. + if !z.NoChecksum { + // Validate the frame checksum. + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + if debugFlag { + debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum) + } + z.pos += 4 + if h := z.checksum.Sum32(); checksum != h { + return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum) + } + } + + // Get ready for the next concatenated frame and keep the position. + pos := z.pos + z.Reset(z.src) + z.pos = pos + + // Since multiple frames can be concatenated, check for more. + return 0, z.readHeader(false) + } + + if debugFlag { + debug("raw block size %d", bLen) + } + if bLen&compressedBlockFlag > 0 { + // Uncompressed block. + bLen &= compressedBlockMask + if debugFlag { + debug("uncompressed block size %d", bLen) + } + if int(bLen) > cap(z.data) { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + z.data = z.data[:bLen] + if _, err := io.ReadFull(z.src, z.data); err != nil { + return 0, err + } + z.pos += int64(bLen) + if z.OnBlockDone != nil { + z.OnBlockDone(int(bLen)) + } + + if z.BlockChecksum { + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if h := xxh32.ChecksumZero(z.data); h != checksum { + return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) + } + } + + } else { + // Compressed block. + if debugFlag { + debug("compressed block size %d", bLen) + } + if int(bLen) > cap(z.data) { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + zdata := z.zdata[:bLen] + if _, err := io.ReadFull(z.src, zdata); err != nil { + return 0, err + } + z.pos += int64(bLen) + + if z.BlockChecksum { + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if h := xxh32.ChecksumZero(zdata); h != checksum { + return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) + } + } + + n, err := UncompressBlock(zdata, z.data) + if err != nil { + return 0, err + } + z.data = z.data[:n] + if z.OnBlockDone != nil { + z.OnBlockDone(n) + } + } + + if !z.NoChecksum { + _, _ = z.checksum.Write(z.data) + if debugFlag { + debug("current frame checksum %x", z.checksum.Sum32()) + } + } + z.idx = 0 + } + + if z.skip > int64(len(z.data[z.idx:])) { + z.skip -= int64(len(z.data[z.idx:])) + z.dpos += int64(len(z.data[z.idx:])) + z.idx = len(z.data) + return 0, nil + } + + z.idx += int(z.skip) + z.dpos += z.skip + z.skip = 0 + + n := copy(buf, z.data[z.idx:]) + z.idx += n + z.dpos += int64(n) + if debugFlag { + debug("copied %d bytes to input", n) + } + + return n, nil +} + +// Seek implements io.Seeker, but supports seeking forward from the current +// position only. Any other seek will return an error. Allows skipping output +// bytes which aren't needed, which in some scenarios is faster than reading +// and discarding them. +// Note this may cause future calls to Read() to read 0 bytes if all of the +// data they would have returned is skipped. +func (z *Reader) Seek(offset int64, whence int) (int64, error) { + if offset < 0 || whence != io.SeekCurrent { + return z.dpos + z.skip, ErrUnsupportedSeek + } + z.skip += offset + return z.dpos + z.skip, nil +} + +// Reset discards the Reader's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) { + z.Header = Header{} + z.pos = 0 + z.src = r + z.zdata = z.zdata[:0] + z.data = z.data[:0] + z.idx = 0 + z.checksum.Reset() +} + +// readUint32 reads an uint32 into the supplied buffer. +// The idea is to make use of the already allocated buffers avoiding additional allocations. +func (z *Reader) readUint32() (uint32, error) { + buf := z.buf[:4] + _, err := io.ReadFull(z.src, buf) + x := binary.LittleEndian.Uint32(buf) + return x, err +} diff --git a/vendor/github.com/pierrec/lz4/reader_legacy.go b/vendor/github.com/pierrec/lz4/reader_legacy.go new file mode 100644 index 00000000000..1670a77d02a --- /dev/null +++ b/vendor/github.com/pierrec/lz4/reader_legacy.go @@ -0,0 +1,207 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "io" +) + +// ReaderLegacy implements the LZ4Demo frame decoder. +// The Header is set after the first call to Read(). +type ReaderLegacy struct { + Header + // Handler called when a block has been successfully read. + // It provides the number of bytes read. + OnBlockDone func(size int) + + lastBlock bool + buf [8]byte // Scrap buffer. + pos int64 // Current position in src. + src io.Reader // Source. + zdata []byte // Compressed data. + data []byte // Uncompressed data. + idx int // Index of unread bytes into data. + skip int64 // Bytes to skip before next read. + dpos int64 // Position in dest +} + +// NewReaderLegacy returns a new LZ4Demo frame decoder. +// No access to the underlying io.Reader is performed. +func NewReaderLegacy(src io.Reader) *ReaderLegacy { + r := &ReaderLegacy{src: src} + return r +} + +// readHeader checks the frame magic number and parses the frame descriptoz. +// Skippable frames are supported even as a first frame although the LZ4 +// specifications recommends skippable frames not to be used as first frames. +func (z *ReaderLegacy) readLegacyHeader() error { + z.lastBlock = false + magic, err := z.readUint32() + if err != nil { + z.pos += 4 + if err == io.ErrUnexpectedEOF { + return io.EOF + } + return err + } + if magic != frameMagicLegacy { + return ErrInvalid + } + z.pos += 4 + + // Legacy has fixed 8MB blocksizes + // https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame + bSize := blockSize4M * 2 + + // Allocate the compressed/uncompressed buffers. + // The compressed buffer cannot exceed the uncompressed one. + if n := 2 * bSize; cap(z.zdata) < n { + z.zdata = make([]byte, n, n) + } + if debugFlag { + debug("header block max size size=%d", bSize) + } + z.zdata = z.zdata[:bSize] + z.data = z.zdata[:cap(z.zdata)][bSize:] + z.idx = len(z.data) + + z.Header.done = true + if debugFlag { + debug("header read: %v", z.Header) + } + + return nil +} + +// Read decompresses data from the underlying source into the supplied buffer. +// +// Since there can be multiple streams concatenated, Header values may +// change between calls to Read(). If that is the case, no data is actually read from +// the underlying io.Reader, to allow for potential input buffer resizing. +func (z *ReaderLegacy) Read(buf []byte) (int, error) { + if debugFlag { + debug("Read buf len=%d", len(buf)) + } + if !z.Header.done { + if err := z.readLegacyHeader(); err != nil { + return 0, err + } + if debugFlag { + debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d", + len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx) + } + } + + if len(buf) == 0 { + return 0, nil + } + + if z.idx == len(z.data) { + // No data ready for reading, process the next block. + if debugFlag { + debug(" reading block from writer %d %d", z.idx, blockSize4M*2) + } + + // Reset uncompressed buffer + z.data = z.zdata[:cap(z.zdata)][len(z.zdata):] + + bLen, err := z.readUint32() + if err != nil { + return 0, err + } + if debugFlag { + debug(" bLen %d (0x%x) offset = %d (0x%x)", bLen, bLen, z.pos, z.pos) + } + z.pos += 4 + + // Legacy blocks are always compressed, even when detrimental + if debugFlag { + debug(" compressed block size %d", bLen) + } + + if int(bLen) > cap(z.data) { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + zdata := z.zdata[:bLen] + if _, err := io.ReadFull(z.src, zdata); err != nil { + return 0, err + } + z.pos += int64(bLen) + + n, err := UncompressBlock(zdata, z.data) + if err != nil { + return 0, err + } + + z.data = z.data[:n] + if z.OnBlockDone != nil { + z.OnBlockDone(n) + } + + z.idx = 0 + + // Legacy blocks are fixed to 8MB, if we read a decompressed block smaller than this + // it means we've reached the end... + if n < blockSize4M*2 { + z.lastBlock = true + } + } + + if z.skip > int64(len(z.data[z.idx:])) { + z.skip -= int64(len(z.data[z.idx:])) + z.dpos += int64(len(z.data[z.idx:])) + z.idx = len(z.data) + return 0, nil + } + + z.idx += int(z.skip) + z.dpos += z.skip + z.skip = 0 + + n := copy(buf, z.data[z.idx:]) + z.idx += n + z.dpos += int64(n) + if debugFlag { + debug("%v] copied %d bytes to input (%d:%d)", z.lastBlock, n, z.idx, len(z.data)) + } + if z.lastBlock && len(z.data) == z.idx { + return n, io.EOF + } + return n, nil +} + +// Seek implements io.Seeker, but supports seeking forward from the current +// position only. Any other seek will return an error. Allows skipping output +// bytes which aren't needed, which in some scenarios is faster than reading +// and discarding them. +// Note this may cause future calls to Read() to read 0 bytes if all of the +// data they would have returned is skipped. +func (z *ReaderLegacy) Seek(offset int64, whence int) (int64, error) { + if offset < 0 || whence != io.SeekCurrent { + return z.dpos + z.skip, ErrUnsupportedSeek + } + z.skip += offset + return z.dpos + z.skip, nil +} + +// Reset discards the Reader's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *ReaderLegacy) Reset(r io.Reader) { + z.Header = Header{} + z.pos = 0 + z.src = r + z.zdata = z.zdata[:0] + z.data = z.data[:0] + z.idx = 0 +} + +// readUint32 reads an uint32 into the supplied buffer. +// The idea is to make use of the already allocated buffers avoiding additional allocations. +func (z *ReaderLegacy) readUint32() (uint32, error) { + buf := z.buf[:4] + _, err := io.ReadFull(z.src, buf) + x := binary.LittleEndian.Uint32(buf) + return x, err +} diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go new file mode 100644 index 00000000000..f066d56305e --- /dev/null +++ b/vendor/github.com/pierrec/lz4/writer.go @@ -0,0 +1,422 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "io" + "runtime" + + "github.com/pierrec/lz4/internal/xxh32" +) + +// zResult contains the results of compressing a block. +type zResult struct { + size uint32 // Block header + data []byte // Compressed data + checksum uint32 // Data checksum +} + +// Writer implements the LZ4 frame encoder. +type Writer struct { + Header + // Handler called when a block has been successfully written out. + // It provides the number of bytes written. + OnBlockDone func(size int) + + buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes + dst io.Writer // Destination. + checksum xxh32.XXHZero // Frame checksum. + data []byte // Data to be compressed + buffer for compressed data. + idx int // Index into data. + hashtable [winSize]int // Hash table used in CompressBlock(). + + // For concurrency. + c chan chan zResult // Channel for block compression goroutines and writer goroutine. + err error // Any error encountered while writing to the underlying destination. +} + +// NewWriter returns a new LZ4 frame encoder. +// No access to the underlying io.Writer is performed. +// The supplied Header is checked at the first Write. +// It is ok to change it before the first Write but then not until a Reset() is performed. +func NewWriter(dst io.Writer) *Writer { + z := new(Writer) + z.Reset(dst) + return z +} + +// WithConcurrency sets the number of concurrent go routines used for compression. +// A negative value sets the concurrency to GOMAXPROCS. +func (z *Writer) WithConcurrency(n int) *Writer { + switch { + case n == 0 || n == 1: + z.c = nil + return z + case n < 0: + n = runtime.GOMAXPROCS(0) + } + z.c = make(chan chan zResult, n) + // Writer goroutine managing concurrent block compression goroutines. + go func() { + // Process next block compression item. + for c := range z.c { + // Read the next compressed block result. + // Waiting here ensures that the blocks are output in the order they were sent. + // The incoming channel is always closed as it indicates to the caller that + // the block has been processed. + res := <-c + n := len(res.data) + if n == 0 { + // Notify the block compression routine that we are done with its result. + // This is used when a sentinel block is sent to terminate the compression. + close(c) + return + } + // Write the block. + if err := z.writeUint32(res.size); err != nil && z.err == nil { + z.err = err + } + if _, err := z.dst.Write(res.data); err != nil && z.err == nil { + z.err = err + } + if z.BlockChecksum { + if err := z.writeUint32(res.checksum); err != nil && z.err == nil { + z.err = err + } + } + // It is now safe to release the buffer as no longer in use by any goroutine. + putBuffer(cap(res.data), res.data) + if h := z.OnBlockDone; h != nil { + h(n) + } + close(c) + } + }() + return z +} + +// newBuffers instantiates new buffers which size matches the one in Header. +// The returned buffers are for decompression and compression respectively. +func (z *Writer) newBuffers() { + bSize := z.Header.BlockMaxSize + buf := getBuffer(bSize) + z.data = buf[:bSize] // Uncompressed buffer is the first half. +} + +// freeBuffers puts the writer's buffers back to the pool. +func (z *Writer) freeBuffers() { + // Put the buffer back into the pool, if any. + putBuffer(z.Header.BlockMaxSize, z.data) + z.data = nil +} + +// writeHeader builds and writes the header (magic+header) to the underlying io.Writer. +func (z *Writer) writeHeader() error { + // Default to 4Mb if BlockMaxSize is not set. + if z.Header.BlockMaxSize == 0 { + z.Header.BlockMaxSize = blockSize4M + } + // The only option that needs to be validated. + bSize := z.Header.BlockMaxSize + if !isValidBlockSize(z.Header.BlockMaxSize) { + return fmt.Errorf("lz4: invalid block max size: %d", bSize) + } + // Allocate the compressed/uncompressed buffers. + // The compressed buffer cannot exceed the uncompressed one. + z.newBuffers() + z.idx = 0 + + // Size is optional. + buf := z.buf[:] + + // Set the fixed size data: magic number, block max size and flags. + binary.LittleEndian.PutUint32(buf[0:], frameMagic) + flg := byte(Version << 6) + flg |= 1 << 5 // No block dependency. + if z.Header.BlockChecksum { + flg |= 1 << 4 + } + if z.Header.Size > 0 { + flg |= 1 << 3 + } + if !z.Header.NoChecksum { + flg |= 1 << 2 + } + buf[4] = flg + buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4 + + // Current buffer size: magic(4) + flags(1) + block max size (1). + n := 6 + // Optional items. + if z.Header.Size > 0 { + binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) + n += 8 + } + + // The header checksum includes the flags, block max size and optional Size. + buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF) + z.checksum.Reset() + + // Header ready, write it out. + if _, err := z.dst.Write(buf[0 : n+1]); err != nil { + return err + } + z.Header.done = true + if debugFlag { + debug("wrote header %v", z.Header) + } + + return nil +} + +// Write compresses data from the supplied buffer into the underlying io.Writer. +// Write does not return until the data has been written. +func (z *Writer) Write(buf []byte) (int, error) { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return 0, err + } + } + if debugFlag { + debug("input buffer len=%d index=%d", len(buf), z.idx) + } + + zn := len(z.data) + var n int + for len(buf) > 0 { + if z.idx == 0 && len(buf) >= zn { + // Avoid a copy as there is enough data for a block. + if err := z.compressBlock(buf[:zn]); err != nil { + return n, err + } + n += zn + buf = buf[zn:] + continue + } + // Accumulate the data to be compressed. + m := copy(z.data[z.idx:], buf) + n += m + z.idx += m + buf = buf[m:] + if debugFlag { + debug("%d bytes copied to buf, current index %d", n, z.idx) + } + + if z.idx < len(z.data) { + // Buffer not filled. + if debugFlag { + debug("need more data for compression") + } + return n, nil + } + + // Buffer full. + if err := z.compressBlock(z.data); err != nil { + return n, err + } + z.idx = 0 + } + + return n, nil +} + +// compressBlock compresses a block. +func (z *Writer) compressBlock(data []byte) error { + if !z.NoChecksum { + _, _ = z.checksum.Write(data) + } + + if z.c != nil { + c := make(chan zResult) + z.c <- c // Send now to guarantee order + + // get a buffer from the pool and copy the data over + block := getBuffer(z.Header.BlockMaxSize)[:len(data)] + copy(block, data) + + go writerCompressBlock(c, z.Header, block) + return nil + } + + zdata := z.data[z.Header.BlockMaxSize:cap(z.data)] + // The compressed block size cannot exceed the input's. + var zn int + + if level := z.Header.CompressionLevel; level != 0 { + zn, _ = CompressBlockHC(data, zdata, level) + } else { + zn, _ = CompressBlock(data, zdata, z.hashtable[:]) + } + + var bLen uint32 + if debugFlag { + debug("block compression %d => %d", len(data), zn) + } + if zn > 0 && zn < len(data) { + // Compressible and compressed size smaller than uncompressed: ok! + bLen = uint32(zn) + zdata = zdata[:zn] + } else { + // Uncompressed block. + bLen = uint32(len(data)) | compressedBlockFlag + zdata = data + } + if debugFlag { + debug("block compression to be written len=%d data len=%d", bLen, len(zdata)) + } + + // Write the block. + if err := z.writeUint32(bLen); err != nil { + return err + } + written, err := z.dst.Write(zdata) + if err != nil { + return err + } + if h := z.OnBlockDone; h != nil { + h(written) + } + + if !z.BlockChecksum { + if debugFlag { + debug("current frame checksum %x", z.checksum.Sum32()) + } + return nil + } + checksum := xxh32.ChecksumZero(zdata) + if debugFlag { + debug("block checksum %x", checksum) + defer func() { debug("current frame checksum %x", z.checksum.Sum32()) }() + } + return z.writeUint32(checksum) +} + +// Flush flushes any pending compressed data to the underlying writer. +// Flush does not return until the data has been written. +// If the underlying writer returns an error, Flush returns that error. +func (z *Writer) Flush() error { + if debugFlag { + debug("flush with index %d", z.idx) + } + if z.idx == 0 { + return nil + } + + data := getBuffer(z.Header.BlockMaxSize)[:len(z.data[:z.idx])] + copy(data, z.data[:z.idx]) + + z.idx = 0 + if z.c == nil { + return z.compressBlock(data) + } + if !z.NoChecksum { + _, _ = z.checksum.Write(data) + } + c := make(chan zResult) + z.c <- c + writerCompressBlock(c, z.Header, data) + return nil +} + +func (z *Writer) close() error { + if z.c == nil { + return nil + } + // Send a sentinel block (no data to compress) to terminate the writer main goroutine. + c := make(chan zResult) + z.c <- c + c <- zResult{} + // Wait for the main goroutine to complete. + <-c + // At this point the main goroutine has shut down or is about to return. + z.c = nil + return z.err +} + +// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return err + } + } + if err := z.Flush(); err != nil { + return err + } + if err := z.close(); err != nil { + return err + } + z.freeBuffers() + + if debugFlag { + debug("writing last empty block") + } + if err := z.writeUint32(0); err != nil { + return err + } + if z.NoChecksum { + return nil + } + checksum := z.checksum.Sum32() + if debugFlag { + debug("stream checksum %x", checksum) + } + return z.writeUint32(checksum) +} + +// Reset clears the state of the Writer z such that it is equivalent to its +// initial state from NewWriter, but instead writing to w. +// No access to the underlying io.Writer is performed. +func (z *Writer) Reset(w io.Writer) { + n := cap(z.c) + _ = z.close() + z.freeBuffers() + z.Header.Reset() + z.dst = w + z.checksum.Reset() + z.idx = 0 + z.err = nil + // reset hashtable to ensure deterministic output. + for i := range z.hashtable { + z.hashtable[i] = 0 + } + z.WithConcurrency(n) +} + +// writeUint32 writes a uint32 to the underlying writer. +func (z *Writer) writeUint32(x uint32) error { + buf := z.buf[:4] + binary.LittleEndian.PutUint32(buf, x) + _, err := z.dst.Write(buf) + return err +} + +// writerCompressBlock compresses data into a pooled buffer and writes its result +// out to the input channel. +func writerCompressBlock(c chan zResult, header Header, data []byte) { + zdata := getBuffer(header.BlockMaxSize) + // The compressed block size cannot exceed the input's. + var zn int + if level := header.CompressionLevel; level != 0 { + zn, _ = CompressBlockHC(data, zdata, level) + } else { + var hashTable [winSize]int + zn, _ = CompressBlock(data, zdata, hashTable[:]) + } + var res zResult + if zn > 0 && zn < len(data) { + res.size = uint32(zn) + res.data = zdata[:zn] + // release the uncompressed block since it is not used anymore + putBuffer(header.BlockMaxSize, data) + } else { + res.size = uint32(len(data)) | compressedBlockFlag + res.data = data + // release the compressed block since it was not used + putBuffer(header.BlockMaxSize, zdata) + } + if header.BlockChecksum { + res.checksum = xxh32.ChecksumZero(res.data) + } + c <- res +} diff --git a/vendor/github.com/pierrec/lz4/writer_legacy.go b/vendor/github.com/pierrec/lz4/writer_legacy.go new file mode 100644 index 00000000000..ca8dc8c7f0c --- /dev/null +++ b/vendor/github.com/pierrec/lz4/writer_legacy.go @@ -0,0 +1,182 @@ +package lz4 + +import ( + "encoding/binary" + "io" +) + +// WriterLegacy implements the LZ4Demo frame decoder. +type WriterLegacy struct { + Header + // Handler called when a block has been successfully read. + // It provides the number of bytes read. + OnBlockDone func(size int) + + dst io.Writer // Destination. + data []byte // Data to be compressed + buffer for compressed data. + idx int // Index into data. + hashtable [winSize]int // Hash table used in CompressBlock(). +} + +// NewWriterLegacy returns a new LZ4 encoder for the legacy frame format. +// No access to the underlying io.Writer is performed. +// The supplied Header is checked at the first Write. +// It is ok to change it before the first Write but then not until a Reset() is performed. +func NewWriterLegacy(dst io.Writer) *WriterLegacy { + z := new(WriterLegacy) + z.Reset(dst) + return z +} + +// Write compresses data from the supplied buffer into the underlying io.Writer. +// Write does not return until the data has been written. +func (z *WriterLegacy) Write(buf []byte) (int, error) { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return 0, err + } + } + if debugFlag { + debug("input buffer len=%d index=%d", len(buf), z.idx) + } + + zn := len(z.data) + var n int + for len(buf) > 0 { + if z.idx == 0 && len(buf) >= zn { + // Avoid a copy as there is enough data for a block. + if err := z.compressBlock(buf[:zn]); err != nil { + return n, err + } + n += zn + buf = buf[zn:] + continue + } + // Accumulate the data to be compressed. + m := copy(z.data[z.idx:], buf) + n += m + z.idx += m + buf = buf[m:] + if debugFlag { + debug("%d bytes copied to buf, current index %d", n, z.idx) + } + + if z.idx < len(z.data) { + // Buffer not filled. + if debugFlag { + debug("need more data for compression") + } + return n, nil + } + + // Buffer full. + if err := z.compressBlock(z.data); err != nil { + return n, err + } + z.idx = 0 + } + + return n, nil +} + +// writeHeader builds and writes the header to the underlying io.Writer. +func (z *WriterLegacy) writeHeader() error { + // Legacy has fixed 8MB blocksizes + // https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame + bSize := 2 * blockSize4M + + buf := make([]byte, 2*bSize, 2*bSize) + z.data = buf[:bSize] // Uncompressed buffer is the first half. + + z.idx = 0 + + // Header consists of one mageic number, write it out. + if err := binary.Write(z.dst, binary.LittleEndian, frameMagicLegacy); err != nil { + return err + } + z.Header.done = true + if debugFlag { + debug("wrote header %v", z.Header) + } + + return nil +} + +// compressBlock compresses a block. +func (z *WriterLegacy) compressBlock(data []byte) error { + bSize := 2 * blockSize4M + zdata := z.data[bSize:cap(z.data)] + // The compressed block size cannot exceed the input's. + var zn int + + if level := z.Header.CompressionLevel; level != 0 { + zn, _ = CompressBlockHC(data, zdata, level) + } else { + zn, _ = CompressBlock(data, zdata, z.hashtable[:]) + } + + if debugFlag { + debug("block compression %d => %d", len(data), zn) + } + zdata = zdata[:zn] + + // Write the block. + if err := binary.Write(z.dst, binary.LittleEndian, uint32(zn)); err != nil { + return err + } + written, err := z.dst.Write(zdata) + if err != nil { + return err + } + if h := z.OnBlockDone; h != nil { + h(written) + } + return nil +} + +// Flush flushes any pending compressed data to the underlying writer. +// Flush does not return until the data has been written. +// If the underlying writer returns an error, Flush returns that error. +func (z *WriterLegacy) Flush() error { + if debugFlag { + debug("flush with index %d", z.idx) + } + if z.idx == 0 { + return nil + } + + data := z.data[:z.idx] + z.idx = 0 + return z.compressBlock(data) +} + +// Close closes the WriterLegacy, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. +func (z *WriterLegacy) Close() error { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return err + } + } + if err := z.Flush(); err != nil { + return err + } + + if debugFlag { + debug("writing last empty block") + } + + return nil +} + +// Reset clears the state of the WriterLegacy z such that it is equivalent to its +// initial state from NewWriterLegacy, but instead writing to w. +// No access to the underlying io.Writer is performed. +func (z *WriterLegacy) Reset(w io.Writer) { + z.Header.Reset() + z.dst = w + z.idx = 0 + // reset hashtable to ensure deterministic output. + for i := range z.hashtable { + z.hashtable[i] = 0 + } +} diff --git a/vendor/github.com/ryanuber/go-glob/.travis.yml b/vendor/github.com/ryanuber/go-glob/.travis.yml new file mode 100644 index 00000000000..9d1ca3c378e --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - tip +script: + - go test -v ./... diff --git a/vendor/github.com/ryanuber/go-glob/LICENSE b/vendor/github.com/ryanuber/go-glob/LICENSE new file mode 100644 index 00000000000..bdfbd951497 --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Ryan Uber + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ryanuber/go-glob/README.md b/vendor/github.com/ryanuber/go-glob/README.md new file mode 100644 index 00000000000..48f7fcb05a4 --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/README.md @@ -0,0 +1,29 @@ +# String globbing in golang [![Build Status](https://travis-ci.org/ryanuber/go-glob.svg)](https://travis-ci.org/ryanuber/go-glob) + +`go-glob` is a single-function library implementing basic string glob support. + +Globs are an extremely user-friendly way of supporting string matching without +requiring knowledge of regular expressions or Go's particular regex engine. Most +people understand that if you put a `*` character somewhere in a string, it is +treated as a wildcard. Surprisingly, this functionality isn't found in Go's +standard library, except for `path.Match`, which is intended to be used while +comparing paths (not arbitrary strings), and contains specialized logic for this +use case. A better solution might be a POSIX basic (non-ERE) regular expression +engine for Go, which doesn't exist currently. + +Example +======= + +``` +package main + +import "github.com/ryanuber/go-glob" + +func main() { + glob.Glob("*World!", "Hello, World!") // true + glob.Glob("Hello,*", "Hello, World!") // true + glob.Glob("*ello,*", "Hello, World!") // true + glob.Glob("World!", "Hello, World!") // false + glob.Glob("/home/*", "/home/ryanuber/.bashrc") // true +} +``` diff --git a/vendor/github.com/ryanuber/go-glob/glob.go b/vendor/github.com/ryanuber/go-glob/glob.go new file mode 100644 index 00000000000..e67db3be183 --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/glob.go @@ -0,0 +1,56 @@ +package glob + +import "strings" + +// The character which is treated like a glob +const GLOB = "*" + +// Glob will test a string pattern, potentially containing globs, against a +// subject string. The result is a simple true/false, determining whether or +// not the glob pattern matched the subject text. +func Glob(pattern, subj string) bool { + // Empty pattern can only match empty subject + if pattern == "" { + return subj == pattern + } + + // If the pattern _is_ a glob, it matches everything + if pattern == GLOB { + return true + } + + parts := strings.Split(pattern, GLOB) + + if len(parts) == 1 { + // No globs in pattern, so test for equality + return subj == pattern + } + + leadingGlob := strings.HasPrefix(pattern, GLOB) + trailingGlob := strings.HasSuffix(pattern, GLOB) + end := len(parts) - 1 + + // Go over the leading parts and ensure they match. + for i := 0; i < end; i++ { + idx := strings.Index(subj, parts[i]) + + switch i { + case 0: + // Check the first section. Requires special handling. + if !leadingGlob && idx != 0 { + return false + } + default: + // Check that the middle parts match. + if idx < 0 { + return false + } + } + + // Trim evaluated text from subj as we loop over the pattern. + subj = subj[idx+len(parts[i]):] + } + + // Reached the last section. Requires special handling. + return trailingGlob || strings.HasSuffix(subj, parts[end]) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go new file mode 100644 index 00000000000..d237ef58ea6 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go @@ -0,0 +1,149 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutils + +import ( + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" +) + +var ( + // OIDOtherName is the OID for the OtherName SAN per RFC 5280 + OIDOtherName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 7} + // SANOID is the OID for Subject Alternative Name per RFC 5280 + SANOID = asn1.ObjectIdentifier{2, 5, 29, 17} +) + +// OtherName describes a name related to a certificate which is not in one +// of the standard name formats. RFC 5280, 4.2.1.6: +// +// OtherName ::= SEQUENCE { +// type-id OBJECT IDENTIFIER, +// value [0] EXPLICIT ANY DEFINED BY type-id } +// +// OtherName for Fulcio-issued certificates only supports UTF-8 strings as values. +type OtherName struct { + ID asn1.ObjectIdentifier + Value string `asn1:"utf8,explicit,tag:0"` +} + +// MarshalOtherNameSAN creates a Subject Alternative Name extension +// with an OtherName sequence. RFC 5280, 4.2.1.6: +// +// SubjectAltName ::= GeneralNames +// GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName +// GeneralName ::= CHOICE { +// +// otherName [0] OtherName, +// ... } +func MarshalOtherNameSAN(name string, critical bool) (*pkix.Extension, error) { + o := OtherName{ + ID: OIDOtherName, + Value: name, + } + bytes, err := asn1.MarshalWithParams(o, "tag:0") + if err != nil { + return nil, err + } + + sans, err := asn1.Marshal([]asn1.RawValue{{FullBytes: bytes}}) + if err != nil { + return nil, err + } + return &pkix.Extension{ + Id: SANOID, + Critical: critical, + Value: sans, + }, nil +} + +// UnmarshalOtherNameSAN extracts a UTF-8 string from the OtherName +// field in the Subject Alternative Name extension. +func UnmarshalOtherNameSAN(exts []pkix.Extension) (string, error) { + var otherNames []string + + for _, e := range exts { + if !e.Id.Equal(SANOID) { + continue + } + + var seq asn1.RawValue + rest, err := asn1.Unmarshal(e.Value, &seq) + if err != nil { + return "", err + } else if len(rest) != 0 { + return "", fmt.Errorf("trailing data after X.509 extension") + } + if !seq.IsCompound || seq.Tag != asn1.TagSequence || seq.Class != asn1.ClassUniversal { + return "", asn1.StructuralError{Msg: "bad SAN sequence"} + } + + rest = seq.Bytes + for len(rest) > 0 { + var v asn1.RawValue + rest, err = asn1.Unmarshal(rest, &v) + if err != nil { + return "", err + } + + // skip all GeneralName fields except OtherName + if v.Tag != 0 { + continue + } + + var other OtherName + if _, err := asn1.UnmarshalWithParams(v.FullBytes, &other, "tag:0"); err != nil { + return "", fmt.Errorf("could not parse requested OtherName SAN: %w", err) + } + if !other.ID.Equal(OIDOtherName) { + return "", fmt.Errorf("unexpected OID for OtherName, expected %v, got %v", OIDOtherName, other.ID) + } + otherNames = append(otherNames, other.Value) + } + } + + if len(otherNames) == 0 { + return "", errors.New("no OtherName found") + } + if len(otherNames) != 1 { + return "", errors.New("expected only one OtherName") + } + + return otherNames[0], nil +} + +// GetSubjectAlternateNames extracts all subject alternative names from +// the certificate, including email addresses, DNS, IP addresses, URIs, +// and OtherName SANs +func GetSubjectAlternateNames(cert *x509.Certificate) []string { + sans := []string{} + sans = append(sans, cert.DNSNames...) + sans = append(sans, cert.EmailAddresses...) + for _, ip := range cert.IPAddresses { + sans = append(sans, ip.String()) + } + for _, uri := range cert.URIs { + sans = append(sans, uri.String()) + } + // ignore error if there's no OtherName SAN + otherName, _ := UnmarshalOtherNameSAN(cert.Extensions) + if len(otherName) > 0 { + sans = append(sans, otherName) + } + return sans +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/client.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/client.go new file mode 100644 index 00000000000..57cb03535a9 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/client.go @@ -0,0 +1,374 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package aws implement the interface with amazon aws kms service +package aws + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io" + "net/http" + "os" + "regexp" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/kms" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/jellydator/ttlcache/v2" + "github.com/sigstore/sigstore/pkg/signature" + sigkms "github.com/sigstore/sigstore/pkg/signature/kms" +) + +func init() { + sigkms.AddProvider(ReferenceScheme, func(ctx context.Context, keyResourceID string, _ crypto.Hash, _ ...signature.RPCOption) (sigkms.SignerVerifier, error) { + return LoadSignerVerifier(ctx, keyResourceID) + }) +} + +const ( + cacheKey = "signer" + // ReferenceScheme schemes for various KMS services are copied from https://github.com/google/go-cloud/tree/master/secrets + ReferenceScheme = "awskms://" +) + +type awsClient struct { + client *kms.Client + endpoint string + keyID string + alias string + keyCache *ttlcache.Cache +} + +var ( + errKMSReference = errors.New("kms specification should be in the format awskms://[ENDPOINT]/[ID/ALIAS/ARN] (endpoint optional)") + + // Key ID/ALIAS/ARN conforms to KMS standard documented here: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id + // Key format examples: + // Key ID: awskms:///1234abcd-12ab-34cd-56ef-1234567890ab + // Key ID with endpoint: awskms://localhost:4566/1234abcd-12ab-34cd-56ef-1234567890ab + // Key ARN: awskms:///arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // Key ARN with endpoint: awskms://localhost:4566/arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // Alias name: awskms:///alias/ExampleAlias + // Alias name with endpoint: awskms://localhost:4566/alias/ExampleAlias + // Alias ARN: awskms:///arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // Alias ARN with endpoint: awskms://localhost:4566/arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + uuidRE = `m?r?k?-?[A-Fa-f0-9]{8}-?[A-Fa-f0-9]{4}-?[A-Fa-f0-9]{4}-?[A-Fa-f0-9]{4}-?[A-Fa-f0-9]{12}` + arnRE = `arn:(?:aws|aws-us-gov):kms:[a-z0-9-]+:\d{12}:` + hostRE = `([^/]*)/` + keyIDRE = regexp.MustCompile(`^awskms://` + hostRE + `(` + uuidRE + `)$`) + keyARNRE = regexp.MustCompile(`^awskms://` + hostRE + `(` + arnRE + `key/` + uuidRE + `)$`) + aliasNameRE = regexp.MustCompile(`^awskms://` + hostRE + `((alias/.*))$`) + aliasARNRE = regexp.MustCompile(`^awskms://` + hostRE + `(` + arnRE + `(alias/.*))$`) + allREs = []*regexp.Regexp{keyIDRE, keyARNRE, aliasNameRE, aliasARNRE} +) + +// ValidReference returns a non-nil error if the reference string is invalid +func ValidReference(ref string) error { + for _, re := range allREs { + if re.MatchString(ref) { + return nil + } + } + return errKMSReference +} + +// ParseReference parses an awskms-scheme URI into its constituent parts. +func ParseReference(resourceID string) (endpoint, keyID, alias string, err error) { + var v []string + for _, re := range allREs { + v = re.FindStringSubmatch(resourceID) + if len(v) >= 3 { + endpoint, keyID = v[1], v[2] + if len(v) == 4 { + alias = v[3] + } + return + } + } + err = fmt.Errorf("invalid awskms format %q", resourceID) + return +} + +func newAWSClient(ctx context.Context, keyResourceID string, opts ...func(*config.LoadOptions) error) (*awsClient, error) { + if err := ValidReference(keyResourceID); err != nil { + return nil, err + } + a := &awsClient{} + var err error + a.endpoint, a.keyID, a.alias, err = ParseReference(keyResourceID) + if err != nil { + return nil, err + } + + if err := a.setupClient(ctx, opts...); err != nil { + return nil, err + } + + a.keyCache = ttlcache.NewCache() + a.keyCache.SetLoaderFunction(a.keyCacheLoaderFunction) + a.keyCache.SkipTTLExtensionOnHit(true) + return a, nil +} + +func (a *awsClient) setupClient(ctx context.Context, opts ...func(*config.LoadOptions) error) (err error) { + if a.endpoint != "" { + opts = append(opts, config.WithEndpointResolverWithOptions( + aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) { + return aws.Endpoint{ + URL: "https://" + a.endpoint, + }, nil + }), + )) + } + if os.Getenv("AWS_TLS_INSECURE_SKIP_VERIFY") == "1" { + opts = append(opts, config.WithHTTPClient(&http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // nolint: gosec + }, + })) + } + + cfg, err := config.LoadDefaultConfig(ctx, opts...) + if err != nil { + return fmt.Errorf("loading AWS config: %w", err) + } + + a.client = kms.NewFromConfig(cfg) + return +} + +type cmk struct { + KeyMetadata *types.KeyMetadata + PublicKey crypto.PublicKey +} + +func (c *cmk) HashFunc() crypto.Hash { + switch c.KeyMetadata.SigningAlgorithms[0] { + case types.SigningAlgorithmSpecRsassaPssSha256, types.SigningAlgorithmSpecRsassaPkcs1V15Sha256, types.SigningAlgorithmSpecEcdsaSha256: + return crypto.SHA256 + case types.SigningAlgorithmSpecRsassaPssSha384, types.SigningAlgorithmSpecRsassaPkcs1V15Sha384, types.SigningAlgorithmSpecEcdsaSha384: + return crypto.SHA384 + case types.SigningAlgorithmSpecRsassaPssSha512, types.SigningAlgorithmSpecRsassaPkcs1V15Sha512, types.SigningAlgorithmSpecEcdsaSha512: + return crypto.SHA512 + default: + return 0 + } +} + +func (c *cmk) Verifier() (signature.Verifier, error) { + switch c.KeyMetadata.SigningAlgorithms[0] { + case types.SigningAlgorithmSpecRsassaPssSha256, types.SigningAlgorithmSpecRsassaPssSha384, types.SigningAlgorithmSpecRsassaPssSha512: + pub, ok := c.PublicKey.(*rsa.PublicKey) + if !ok { + return nil, fmt.Errorf("public key is not rsa") + } + return signature.LoadRSAPSSVerifier(pub, c.HashFunc(), nil) + case types.SigningAlgorithmSpecRsassaPkcs1V15Sha256, types.SigningAlgorithmSpecRsassaPkcs1V15Sha384, types.SigningAlgorithmSpecRsassaPkcs1V15Sha512: + pub, ok := c.PublicKey.(*rsa.PublicKey) + if !ok { + return nil, fmt.Errorf("public key is not rsa") + } + return signature.LoadRSAPKCS1v15Verifier(pub, c.HashFunc()) + case types.SigningAlgorithmSpecEcdsaSha256, types.SigningAlgorithmSpecEcdsaSha384, types.SigningAlgorithmSpecEcdsaSha512: + pub, ok := c.PublicKey.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf("public key is not ecdsa") + } + return signature.LoadECDSAVerifier(pub, c.HashFunc()) + default: + return nil, fmt.Errorf("signing algorithm unsupported") + } +} + +func (a *awsClient) keyCacheLoaderFunction(key string) (cmk interface{}, ttl time.Duration, err error) { + return a.keyCacheLoaderFunctionWithContext(context.Background())(key) +} + +func (a *awsClient) keyCacheLoaderFunctionWithContext(ctx context.Context) ttlcache.LoaderFunction { + return func(key string) (cmk interface{}, ttl time.Duration, err error) { + cmk, err = a.fetchCMK(ctx) + ttl = time.Second * 300 + return + } +} + +func (a *awsClient) fetchCMK(ctx context.Context) (*cmk, error) { + var err error + cmk := &cmk{} + cmk.PublicKey, err = a.fetchPublicKey(ctx) + if err != nil { + return nil, err + } + cmk.KeyMetadata, err = a.fetchKeyMetadata(ctx) + if err != nil { + return nil, err + } + return cmk, nil +} + +func (a *awsClient) getHashFunc(ctx context.Context) (crypto.Hash, error) { + cmk, err := a.getCMK(ctx) + if err != nil { + return 0, err + } + return cmk.HashFunc(), nil +} + +func (a *awsClient) getCMK(ctx context.Context) (*cmk, error) { + c, err := a.keyCache.GetByLoader(cacheKey, a.keyCacheLoaderFunctionWithContext(ctx)) + if err != nil { + return nil, err + } + cmk, ok := c.(*cmk) + if !ok { + return nil, fmt.Errorf("could not parse cache value as cmk") + } + return cmk, nil +} + +func (a *awsClient) createKey(ctx context.Context, algorithm string) (crypto.PublicKey, error) { + if a.alias == "" { + return nil, errors.New("must use alias key format") + } + + // look for existing key first + out, err := a.public(ctx) + if err == nil { + return out, nil + } + + // return error if not *kms.NotFoundException + var errNotFound *types.NotFoundException + if !errors.As(err, &errNotFound) { + return nil, fmt.Errorf("looking up key: %w", err) + } + + usage := types.KeyUsageTypeSignVerify + description := "Created by Sigstore" + key, err := a.client.CreateKey(ctx, &kms.CreateKeyInput{ + CustomerMasterKeySpec: types.CustomerMasterKeySpec(algorithm), + KeyUsage: usage, + Description: &description, + }) + if err != nil { + return nil, fmt.Errorf("creating key: %w", err) + } + + _, err = a.client.CreateAlias(ctx, &kms.CreateAliasInput{ + AliasName: &a.alias, + TargetKeyId: key.KeyMetadata.KeyId, + }) + if err != nil { + return nil, fmt.Errorf("creating alias %q: %w", a.alias, err) + } + + return a.public(ctx) +} + +func (a *awsClient) verify(ctx context.Context, sig, message io.Reader, opts ...signature.VerifyOption) error { + cmk, err := a.getCMK(ctx) + if err != nil { + return err + } + verifier, err := cmk.Verifier() + if err != nil { + return err + } + return verifier.VerifySignature(sig, message, opts...) +} + +func (a *awsClient) verifyRemotely(ctx context.Context, sig, digest []byte) error { + cmk, err := a.getCMK(ctx) + if err != nil { + return err + } + alg := cmk.KeyMetadata.SigningAlgorithms[0] + messageType := types.MessageTypeDigest + if _, err := a.client.Verify(ctx, &kms.VerifyInput{ + KeyId: &a.keyID, + Message: digest, + MessageType: messageType, + Signature: sig, + SigningAlgorithm: alg, + }); err != nil { + return fmt.Errorf("unable to verify signature: %w", err) + } + return nil +} + +func (a *awsClient) public(ctx context.Context) (crypto.PublicKey, error) { + key, err := a.keyCache.GetByLoader(cacheKey, a.keyCacheLoaderFunctionWithContext(ctx)) + if err != nil { + return nil, err + } + cmk, ok := key.(*cmk) + if !ok { + return nil, fmt.Errorf("could not parse key as cmk") + } + return cmk.PublicKey, nil +} + +func (a *awsClient) sign(ctx context.Context, digest []byte, _ crypto.Hash) ([]byte, error) { + cmk, err := a.getCMK(ctx) + if err != nil { + return nil, err + } + alg := cmk.KeyMetadata.SigningAlgorithms[0] + + messageType := types.MessageTypeDigest + out, err := a.client.Sign(ctx, &kms.SignInput{ + KeyId: &a.keyID, + Message: digest, + MessageType: messageType, + SigningAlgorithm: alg, + }) + if err != nil { + return nil, fmt.Errorf("signing with kms: %w", err) + } + return out.Signature, nil +} + +func (a *awsClient) fetchPublicKey(ctx context.Context) (crypto.PublicKey, error) { + out, err := a.client.GetPublicKey(ctx, &kms.GetPublicKeyInput{ + KeyId: &a.keyID, + }) + if err != nil { + return nil, fmt.Errorf("getting public key: %w", err) + } + key, err := x509.ParsePKIXPublicKey(out.PublicKey) + if err != nil { + return nil, fmt.Errorf("parsing public key: %w", err) + } + return key, nil +} + +func (a *awsClient) fetchKeyMetadata(ctx context.Context) (*types.KeyMetadata, error) { + out, err := a.client.DescribeKey(ctx, &kms.DescribeKeyInput{ + KeyId: &a.keyID, + }) + if err != nil { + return nil, fmt.Errorf("getting key metadata: %w", err) + } + return out.KeyMetadata, nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/doc.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/doc.go new file mode 100644 index 00000000000..2dcd66eaf7f --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/doc.go @@ -0,0 +1,17 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package aws contains utilities related to AWS KMS. +package aws diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/signer.go new file mode 100644 index 00000000000..abab7e61589 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/signer.go @@ -0,0 +1,243 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aws + +import ( + "context" + "crypto" + "fmt" + "io" + + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +var awsSupportedAlgorithms = []types.CustomerMasterKeySpec{ + types.CustomerMasterKeySpecRsa2048, + types.CustomerMasterKeySpecRsa3072, + types.CustomerMasterKeySpecRsa4096, + types.CustomerMasterKeySpecEccNistP256, + types.CustomerMasterKeySpecEccNistP384, + types.CustomerMasterKeySpecEccNistP521, +} + +var awsSupportedHashFuncs = []crypto.Hash{ + crypto.SHA256, + crypto.SHA384, + crypto.SHA512, +} + +// SignerVerifier is a signature.SignerVerifier that uses the AWS Key Management Service +type SignerVerifier struct { + client *awsClient +} + +// LoadSignerVerifier generates signatures using the specified key object in AWS KMS and hash algorithm. +// +// It also can verify signatures locally using the public key. hashFunc must not be crypto.Hash(0). +func LoadSignerVerifier(ctx context.Context, referenceStr string, opts ...func(*config.LoadOptions) error) (*SignerVerifier, error) { + a := &SignerVerifier{} + + var err error + a.client, err = newAWSClient(ctx, referenceStr, opts...) + if err != nil { + return nil, err + } + + return a, nil +} + +// SignMessage signs the provided message using AWS KMS. If the message is provided, +// this method will compute the digest according to the hash function specified +// when the Signer was created. +// +// SignMessage recognizes the following Options listed in order of preference: +// +// - WithContext() +// +// - WithDigest() +// +// - WithCryptoSignerOpts() +// +// All other options are ignored if specified. +func (a *SignerVerifier) SignMessage(message io.Reader, opts ...signature.SignOption) ([]byte, error) { + var digest []byte + var err error + ctx := context.Background() + + for _, opt := range opts { + opt.ApplyContext(&ctx) + opt.ApplyDigest(&digest) + } + + var signerOpts crypto.SignerOpts + signerOpts, err = a.client.getHashFunc(ctx) + if err != nil { + return nil, fmt.Errorf("getting fetching default hash function: %w", err) + } + for _, opt := range opts { + opt.ApplyCryptoSignerOpts(&signerOpts) + } + + hf := signerOpts.HashFunc() + + if len(digest) == 0 { + digest, hf, err = signature.ComputeDigestForSigning(message, hf, awsSupportedHashFuncs, opts...) + if err != nil { + return nil, err + } + } + + return a.client.sign(ctx, digest, hf) +} + +// PublicKey returns the public key that can be used to verify signatures created by +// this signer. If the caller wishes to specify the context to use to obtain +// the public key, pass option.WithContext(desiredCtx). +// +// All other options are ignored if specified. +func (a *SignerVerifier) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) { + ctx := context.Background() + for _, opt := range opts { + opt.ApplyContext(&ctx) + } + + return a.client.public(ctx) +} + +// VerifySignature verifies the signature for the given message. Unless provided +// in an option, the digest of the message will be computed using the hash function specified +// when the SignerVerifier was created. +// +// This function returns nil if the verification succeeded, and an error message otherwise. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithContext() +// +// - WithDigest() +// +// - WithRemoteVerification() +// +// - WithCryptoSignerOpts() +// +// All other options are ignored if specified. +func (a *SignerVerifier) VerifySignature(sig, message io.Reader, opts ...signature.VerifyOption) (err error) { + ctx := context.Background() + var digest []byte + var remoteVerification bool + + for _, opt := range opts { + opt.ApplyContext(&ctx) + opt.ApplyDigest(&digest) + opt.ApplyRemoteVerification(&remoteVerification) + } + + if !remoteVerification { + return a.client.verify(ctx, sig, message, opts...) + } + + var signerOpts crypto.SignerOpts + signerOpts, err = a.client.getHashFunc(ctx) + if err != nil { + return fmt.Errorf("getting hash func: %w", err) + } + for _, opt := range opts { + opt.ApplyCryptoSignerOpts(&signerOpts) + } + hf := signerOpts.HashFunc() + + if len(digest) == 0 { + digest, _, err = signature.ComputeDigestForVerifying(message, hf, awsSupportedHashFuncs, opts...) + if err != nil { + return err + } + } + + sigBytes, err := io.ReadAll(sig) + if err != nil { + return fmt.Errorf("reading signature: %w", err) + } + return a.client.verifyRemotely(ctx, sigBytes, digest) +} + +// CreateKey attempts to create a new key in Vault with the specified algorithm. +func (a *SignerVerifier) CreateKey(ctx context.Context, algorithm string) (crypto.PublicKey, error) { + return a.client.createKey(ctx, algorithm) +} + +type cryptoSignerWrapper struct { + ctx context.Context + hashFunc crypto.Hash + sv *SignerVerifier + errFunc func(error) +} + +func (c cryptoSignerWrapper) Public() crypto.PublicKey { + pk, err := c.sv.PublicKey(options.WithContext(c.ctx)) + if err != nil && c.errFunc != nil { + c.errFunc(err) + } + return pk +} + +func (c cryptoSignerWrapper) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) { + hashFunc := c.hashFunc + if opts != nil { + hashFunc = opts.HashFunc() + } + awsOptions := []signature.SignOption{ + options.WithContext(c.ctx), + options.WithDigest(digest), + options.WithCryptoSignerOpts(hashFunc), + } + + return c.sv.SignMessage(nil, awsOptions...) +} + +// CryptoSigner returns a crypto.Signer object that uses the underlying SignerVerifier, along with a crypto.SignerOpts object +// that allows the KMS to be used in APIs that only accept the standard golang objects +func (a *SignerVerifier) CryptoSigner(ctx context.Context, errFunc func(error)) (crypto.Signer, crypto.SignerOpts, error) { + defaultHf, err := a.client.getHashFunc(ctx) + if err != nil { + return nil, nil, fmt.Errorf("getting fetching default hash function: %w", err) + } + + csw := &cryptoSignerWrapper{ + ctx: ctx, + sv: a, + hashFunc: defaultHf, + errFunc: errFunc, + } + + return csw, defaultHf, nil +} + +// SupportedAlgorithms returns the list of algorithms supported by the AWS KMS service +func (*SignerVerifier) SupportedAlgorithms() []string { + s := make([]string, len(awsSupportedAlgorithms)) + for i := range awsSupportedAlgorithms { + s[i] = string(awsSupportedAlgorithms[i]) + } + return s +} + +// DefaultAlgorithm returns the default algorithm for the AWS KMS service +func (*SignerVerifier) DefaultAlgorithm() string { + return string(types.CustomerMasterKeySpecEccNistP256) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/README.md b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/README.md new file mode 100644 index 00000000000..55a73d57547 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/README.md @@ -0,0 +1,55 @@ +# Azure KMS + +In order to use Azure KMS ([Key Vault](https://docs.microsoft.com/en-us/azure/key-vault/general/basic-concepts)) with the sigstore project you need to have a few things setup in Azure first. +The key creation will be handled in sigstore, however the Azure Key Vault and the required permission will have to be configured before. + +## Azure Prerequisites + +- [Resource Group](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/manage-resource-groups-portal#what-is-a-resource-group) +- [Key Vault](https://docs.microsoft.com/en-us/azure/key-vault/general/basic-concepts) +- [Key Vault permissions](https://docs.microsoft.com/en-us/azure/key-vault/general/rbac-guide) +- [Container Registry](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-intro) _(not required, but used in below examples)_ + +## Permissions (Access Policies) + +Different commands require different Key Vault access policies. For more information check the official [Azure Docs](https://azure.microsoft.com/en-us/services/key-vault/). + +**cosign generate-key-pair** + +Required access policies (keys): `get`, `create` + +```shell +cosign generate-key-pair --kms azurekms://[Key Vault Name].vault.azure.net/[Key Name] +``` + +**cosign sign** + +Required access policies (keys): `get`, `sign` + +```shell +az acr login --name [Container Registry Name] +cosign sign --key azurekms://[Key Vault Name].vault.azure.net/[Key Name] [Container Registry Name].azurecr.io/[Image Name] +``` + +**cosign verify** + +Required access policy (keys): `verify` + +```shell +az acr login --name [Container Registry Name] +cosign verify --key azurekms://[Key Vault Name].vault.azure.net/[Key Name] [Container Registry Name].azurecr.io/[Image Name] +``` + +## Authentication + +There are multiple authentication methods supported for Azure Key Vault and by default they will be evaluated in the following order: + +1. Client credentials (FromEnvironment) +1. Client certificate (FromEnvironment) +1. Username password (FromEnvironment) +1. MSI (FromEnvironment) +1. CLI (FromCLI) + +You can force either `FromEnvironment` or `FromCLI` by configuring the environment variable `AZURE_AUTH_METHOD` to either `environment` or `cli`. + +For backward compatibility, if you configure `AZURE_TENANT_ID`, `AZURE_CLIENT_ID` and `AZURE_CLIENT_SECRET`, `FromEnvironment` will be used. diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go new file mode 100644 index 00000000000..083e5ea6a0b --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go @@ -0,0 +1,342 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package azure implement the interface with microsoft azure kms service +package azure + +import ( + "context" + "crypto" + "crypto/ecdsa" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "regexp" + "strings" + "time" + + "github.com/jellydator/ttlcache/v2" + jose "gopkg.in/square/go-jose.v2" + + kvauth "github.com/Azure/azure-sdk-for-go/services/keyvault/auth" + "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" + "github.com/sigstore/sigstore/pkg/signature" + sigkms "github.com/sigstore/sigstore/pkg/signature/kms" +) + +func init() { + sigkms.AddProvider(ReferenceScheme, func(ctx context.Context, keyResourceID string, hashFunc crypto.Hash, opts ...signature.RPCOption) (sigkms.SignerVerifier, error) { + return LoadSignerVerifier(ctx, keyResourceID, hashFunc) + }) +} + +type kvClient interface { + CreateKey(ctx context.Context, vaultBaseURL, keyName string, parameters keyvault.KeyCreateParameters) (result keyvault.KeyBundle, err error) + GetKey(ctx context.Context, vaultBaseURL, keyName, keyVersion string) (result keyvault.KeyBundle, err error) + Sign(ctx context.Context, vaultBaseURL, keyName, keyVersion string, parameters keyvault.KeySignParameters) (result keyvault.KeyOperationResult, err error) + Verify(ctx context.Context, vaultBaseURL, keyName, keyVersion string, parameters keyvault.KeyVerifyParameters) (result keyvault.KeyVerifyResult, err error) +} + +type azureVaultClient struct { + client kvClient + keyCache *ttlcache.Cache + vaultURL string + vaultName string + keyName string +} + +var ( + errAzureReference = errors.New("kms specification should be in the format azurekms://[VAULT_NAME][VAULT_URL]/[KEY_NAME]") + + referenceRegex = regexp.MustCompile(`^azurekms://([^/]+)/([^/]+)?$`) +) + +const ( + // ReferenceScheme schemes for various KMS services are copied from https://github.com/google/go-cloud/tree/master/secrets + ReferenceScheme = "azurekms://" + cacheKey = "azure_vault_signer" +) + +// ValidReference returns a non-nil error if the reference string is invalid +func ValidReference(ref string) error { + if !referenceRegex.MatchString(ref) { + return errAzureReference + } + return nil +} + +func parseReference(resourceID string) (vaultURL, vaultName, keyName string, err error) { + v := referenceRegex.FindStringSubmatch(resourceID) + if len(v) != 3 { + err = fmt.Errorf("invalid azurekms format %q", resourceID) + return + } + + vaultURL = fmt.Sprintf("https://%s/", v[1]) + vaultName, keyName = strings.Split(v[1], ".")[0], v[2] + return +} + +func newAzureKMS(_ context.Context, keyResourceID string) (*azureVaultClient, error) { + if err := ValidReference(keyResourceID); err != nil { + return nil, err + } + vaultURL, vaultName, keyName, err := parseReference(keyResourceID) + if err != nil { + return nil, err + } + + client, err := getKeysClient() + if err != nil { + return nil, fmt.Errorf("new azure kms client: %w", err) + } + + azClient := &azureVaultClient{ + client: &client, + vaultURL: vaultURL, + vaultName: vaultName, + keyName: keyName, + keyCache: ttlcache.NewCache(), + } + + azClient.keyCache.SetLoaderFunction(azClient.keyCacheLoaderFunction) + azClient.keyCache.SkipTTLExtensionOnHit(true) + + return azClient, nil +} + +type authenticationMethod string + +const ( + unknownAuthenticationMethod = "unknown" + environmentAuthenticationMethod = "environment" + cliAuthenticationMethod = "cli" +) + +// getAuthMethod returns the an authenticationMethod to use to get an Azure Authorizer. +// If no environment variables are set, unknownAuthMethod will be used. +// If the environment variable 'AZURE_AUTH_METHOD' is set to either environment or cli, use it. +// If the environment variables 'AZURE_TENANT_ID', 'AZURE_CLIENT_ID' and 'AZURE_CLIENT_SECRET' are set, use environment. +func getAuthenticationMethod() authenticationMethod { + tenantID := os.Getenv("AZURE_TENANT_ID") + clientID := os.Getenv("AZURE_CLIENT_ID") + clientSecret := os.Getenv("AZURE_CLIENT_SECRET") + authMethod := os.Getenv("AZURE_AUTH_METHOD") + + if authMethod != "" { + switch strings.ToLower(authMethod) { + case "environment": + return environmentAuthenticationMethod + case "cli": + return cliAuthenticationMethod + } + } + + if tenantID != "" && clientID != "" && clientSecret != "" { + return environmentAuthenticationMethod + } + + return unknownAuthenticationMethod +} + +// getAuthorizer takes an authenticationMethod and returns an Authorizer or an error. +// If the method is unknown, Environment will be tested and if it returns an error CLI will be tested. +// If the method is specified, the specified method will be used and no other will be tested. +// This means the following default order of methods will be used if nothing else is defined: +// 1. Client credentials (FromEnvironment) +// 2. Client certificate (FromEnvironment) +// 3. Username password (FromEnvironment) +// 4. MSI (FromEnvironment) +// 5. CLI (FromCLI) +func getAuthorizer(method authenticationMethod) (autorest.Authorizer, error) { + switch method { + case environmentAuthenticationMethod: + return kvauth.NewAuthorizerFromEnvironment() + case cliAuthenticationMethod: + return kvauth.NewAuthorizerFromCLI() + case unknownAuthenticationMethod: + break + default: + return nil, fmt.Errorf("you should never reach this") + } + + authorizer, err := kvauth.NewAuthorizerFromEnvironment() + if err == nil { + return authorizer, nil + } + + return kvauth.NewAuthorizerFromCLI() +} + +func getKeysClient() (keyvault.BaseClient, error) { + keyClient := keyvault.New() + + authMethod := getAuthenticationMethod() + authorizer, err := getAuthorizer(authMethod) + if err != nil { + return keyvault.BaseClient{}, err + } + + keyClient.Authorizer = authorizer + err = keyClient.AddToUserAgent("sigstore") + if err != nil { + return keyvault.BaseClient{}, err + } + + return keyClient, nil +} + +func (a *azureVaultClient) keyCacheLoaderFunction(key string) (data interface{}, ttl time.Duration, err error) { + ttl = time.Second * 300 + var pubKey crypto.PublicKey + + pubKey, err = a.fetchPublicKey(context.Background()) + if err != nil { + data = nil + return + } + + data = pubKey + return data, ttl, err +} + +func (a *azureVaultClient) fetchPublicKey(ctx context.Context) (crypto.PublicKey, error) { + keyBundle, err := a.getKey(ctx) + if err != nil { + return nil, fmt.Errorf("public key: %w", err) + } + + key := keyBundle.Key + keyType := string(key.Kty) + + // Azure Key Vault allows keys to be stored in either default Key Vault storage + // or in managed HSMs. If the key is stored in a HSM, the key type is suffixed + // with "-HSM". Since this suffix is specific to Azure Key Vault, it needs + // be stripped from the key type before attempting to represent the key + // with a go-jose/JSONWebKey struct. + if strings.HasSuffix(keyType, "-HSM") { + split := strings.Split(keyType, "-HSM") + // since we split on the suffix, there should be only two elements + // the first element should contain the key type without the -HSM suffix + newKeyType := split[0] + key.Kty = keyvault.JSONWebKeyType(newKeyType) + } + + jwkJSON, err := json.Marshal(*key) + if err != nil { + return nil, fmt.Errorf("encoding the jsonWebKey: %w", err) + } + + jwk := jose.JSONWebKey{} + err = jwk.UnmarshalJSON(jwkJSON) + if err != nil { + return nil, fmt.Errorf("decoding the jsonWebKey: %w", err) + } + + pub, ok := jwk.Key.(*ecdsa.PublicKey) + if !ok { + if err != nil { + return nil, fmt.Errorf("public key was not ECDSA: %#v", pub) + } + } + + return pub, nil +} + +func (a *azureVaultClient) getKey(ctx context.Context) (keyvault.KeyBundle, error) { + key, err := a.client.GetKey(ctx, a.vaultURL, a.keyName, "") + if err != nil { + return keyvault.KeyBundle{}, fmt.Errorf("public key: %w", err) + } + + return key, err +} + +func (a *azureVaultClient) public() (crypto.PublicKey, error) { + return a.keyCache.Get(cacheKey) +} + +func (a *azureVaultClient) createKey(ctx context.Context) (crypto.PublicKey, error) { + _, err := a.getKey(ctx) + if err == nil { + return a.public() + } + + _, err = a.client.CreateKey( + ctx, + a.vaultURL, + a.keyName, + keyvault.KeyCreateParameters{ + KeyAttributes: &keyvault.KeyAttributes{ + Enabled: to.BoolPtr(true), + }, + KeySize: to.Int32Ptr(2048), + KeyOps: &[]keyvault.JSONWebKeyOperation{ + keyvault.Sign, + keyvault.Verify, + }, + Kty: keyvault.EC, + Tags: map[string]*string{ + "use": to.StringPtr("sigstore"), + }, + }) + if err != nil { + return nil, err + } + + return a.public() +} + +func (a *azureVaultClient) sign(ctx context.Context, hash []byte) ([]byte, error) { + params := keyvault.KeySignParameters{ + Algorithm: keyvault.ES256, + Value: to.StringPtr(base64.RawURLEncoding.EncodeToString(hash)), + } + + result, err := a.client.Sign(ctx, a.vaultURL, a.keyName, "", params) + if err != nil { + return nil, fmt.Errorf("signing the payload: %w", err) + } + + decResult, err := base64.RawURLEncoding.DecodeString(*result.Result) + if err != nil { + return nil, fmt.Errorf("decoding the result: %w", err) + } + + return decResult, nil +} + +func (a *azureVaultClient) verify(ctx context.Context, signature, hash []byte) error { + params := keyvault.KeyVerifyParameters{ + Algorithm: keyvault.ES256, + Digest: to.StringPtr(base64.RawURLEncoding.EncodeToString(hash)), + Signature: to.StringPtr(base64.RawURLEncoding.EncodeToString(signature)), + } + + result, err := a.client.Verify(ctx, a.vaultURL, a.keyName, "", params) + if err != nil { + return fmt.Errorf("verify: %w", err) + } + + if !*result.Value { + return errors.New("failed vault verification") + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/doc.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/doc.go new file mode 100644 index 00000000000..02fcc239b63 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/doc.go @@ -0,0 +1,17 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package azure contains utilities related to Microsoft Azure KMS. +package azure diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/signer.go new file mode 100644 index 00000000000..9212b947624 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/signer.go @@ -0,0 +1,237 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "context" + "crypto" + "errors" + "fmt" + "io" + "math/big" + + "golang.org/x/crypto/cryptobyte" + "golang.org/x/crypto/cryptobyte/asn1" + + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +var azureSupportedHashFuncs = []crypto.Hash{ + crypto.SHA256, +} + +//nolint:revive +const ( + AlgorithmES256 = "ES256" +) + +var azureSupportedAlgorithms = []string{ + AlgorithmES256, +} + +// SignerVerifier creates and verifies digital signatures over a message using Azure KMS service +type SignerVerifier struct { + defaultCtx context.Context + hashFunc crypto.Hash + client *azureVaultClient +} + +// LoadSignerVerifier generates signatures using the specified key in Azure Key Vault and hash algorithm. +// +// It also can verify signatures locally using the public key. hashFunc must not be crypto.Hash(0). +func LoadSignerVerifier(defaultCtx context.Context, referenceStr string, hashFunc crypto.Hash) (*SignerVerifier, error) { + a := &SignerVerifier{ + defaultCtx: defaultCtx, + } + + var err error + a.client, err = newAzureKMS(defaultCtx, referenceStr) + if err != nil { + return nil, err + } + + switch hashFunc { + case 0, crypto.SHA224, crypto.SHA256, crypto.SHA384, crypto.SHA512: + a.hashFunc = hashFunc + default: + return nil, errors.New("hash function not supported by Azure Key Vault") + } + + return a, nil +} + +// SignMessage signs the provided message using Azure Key Vault. If the message is provided, +// this method will compute the digest according to the hash function specified +// when the Signer was created. +// +// SignMessage recognizes the following Options listed in order of preference: +// +// - WithContext() +// +// - WithDigest() +// +// - WithCryptoSignerOpts() +// +// All other options are ignored if specified. +func (a *SignerVerifier) SignMessage(message io.Reader, opts ...signature.SignOption) ([]byte, error) { + ctx := context.Background() + var digest []byte + var signerOpts crypto.SignerOpts = a.hashFunc + + for _, opt := range opts { + opt.ApplyDigest(&digest) + opt.ApplyCryptoSignerOpts(&signerOpts) + } + + digest, _, err := signature.ComputeDigestForSigning(message, signerOpts.HashFunc(), azureSupportedHashFuncs, opts...) + if err != nil { + return nil, err + } + + rawSig, err := a.client.sign(ctx, digest) + if err != nil { + return nil, err + } + + l := len(rawSig) + r, s := &big.Int{}, &big.Int{} + r.SetBytes(rawSig[0 : l/2]) + s.SetBytes(rawSig[l/2:]) + + // Convert the concantenated r||s byte string to an ASN.1 sequence + // This logic is borrowed from https://cs.opensource.google/go/go/+/refs/tags/go1.17.3:src/crypto/ecdsa/ecdsa.go;l=121 + var b cryptobyte.Builder + b.AddASN1(asn1.SEQUENCE, func(b *cryptobyte.Builder) { + b.AddASN1BigInt(r) + b.AddASN1BigInt(s) + }) + + return b.Bytes() +} + +// VerifySignature verifies the signature for the given message. Unless provided +// in an option, the digest of the message will be computed using the hash function specified +// when the SignerVerifier was created. +// +// This function returns nil if the verification succeeded, and an error message otherwise. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// All other options are ignored if specified. +func (a *SignerVerifier) VerifySignature(sig, message io.Reader, opts ...signature.VerifyOption) error { + ctx := context.Background() + var digest []byte + var signerOpts crypto.SignerOpts = a.hashFunc + for _, opt := range opts { + opt.ApplyDigest(&digest) + } + + digest, _, err := signature.ComputeDigestForVerifying(message, signerOpts.HashFunc(), azureSupportedHashFuncs, opts...) + if err != nil { + return err + } + + sigBytes, err := io.ReadAll(sig) + if err != nil { + return fmt.Errorf("reading signature: %w", err) + } + + // Convert the ANS.1 Sequence to a concantenated r||s byte string + // This logic is borrowed from https://cs.opensource.google/go/go/+/refs/tags/go1.17.3:src/crypto/ecdsa/ecdsa.go;l=339 + var ( + r, s = &big.Int{}, &big.Int{} + inner cryptobyte.String + ) + input := cryptobyte.String(sigBytes) + if !input.ReadASN1(&inner, asn1.SEQUENCE) || + !input.Empty() || + !inner.ReadASN1Integer(r) || + !inner.ReadASN1Integer(s) || + !inner.Empty() { + return errors.New("parsing signature") + } + + rawSigBytes := []byte{} + rawSigBytes = append(rawSigBytes, r.Bytes()...) + rawSigBytes = append(rawSigBytes, s.Bytes()...) + return a.client.verify(ctx, rawSigBytes, digest) +} + +// PublicKey returns the public key that can be used to verify signatures created by +// this signer. All options provided in arguments to this method are ignored. +func (a *SignerVerifier) PublicKey(_ ...signature.PublicKeyOption) (crypto.PublicKey, error) { + return a.client.public() +} + +// CreateKey attempts to create a new key in Vault with the specified algorithm. +func (a *SignerVerifier) CreateKey(ctx context.Context, algorithm string) (crypto.PublicKey, error) { + return a.client.createKey(ctx) +} + +type cryptoSignerWrapper struct { + ctx context.Context + hashFunc crypto.Hash + sv *SignerVerifier + errFunc func(error) +} + +func (c cryptoSignerWrapper) Public() crypto.PublicKey { + pk, err := c.sv.PublicKey(options.WithContext(c.ctx)) + if err != nil && c.errFunc != nil { + c.errFunc(err) + } + return pk +} + +func (c cryptoSignerWrapper) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) { + hashFunc := c.hashFunc + if opts != nil { + hashFunc = opts.HashFunc() + } + azOptions := []signature.SignOption{ + options.WithContext(c.ctx), + options.WithDigest(digest), + options.WithCryptoSignerOpts(hashFunc), + } + + return c.sv.SignMessage(nil, azOptions...) +} + +// CryptoSigner returns a crypto.Signer object that uses the underlying SignerVerifier, along with a crypto.SignerOpts object +// that allows the KMS to be used in APIs that only accept the standard golang objects +func (a *SignerVerifier) CryptoSigner(ctx context.Context, errFunc func(error)) (crypto.Signer, crypto.SignerOpts, error) { + csw := &cryptoSignerWrapper{ + ctx: ctx, + sv: a, + hashFunc: a.hashFunc, + errFunc: errFunc, + } + + return csw, a.hashFunc, nil +} + +// SupportedAlgorithms returns the list of algorithms supported by the Azure KMS service +func (*SignerVerifier) SupportedAlgorithms() []string { + return azureSupportedAlgorithms +} + +// DefaultAlgorithm returns the default algorithm for the Azure KMS service +func (*SignerVerifier) DefaultAlgorithm() string { + return AlgorithmES256 +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/doc.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/doc.go new file mode 100644 index 00000000000..592c9ab3353 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/doc.go @@ -0,0 +1,17 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package kms contains utilities related to third-party KMS providers. +package kms diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/client.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/client.go new file mode 100644 index 00000000000..96792583996 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/client.go @@ -0,0 +1,418 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package gcp implement the interface with google cloud kms service +package gcp + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "errors" + "fmt" + "hash/crc32" + "io" + "log" + "regexp" + "time" + + gcpkms "cloud.google.com/go/kms/apiv1" + "cloud.google.com/go/kms/apiv1/kmspb" + "google.golang.org/api/option" + "google.golang.org/protobuf/types/known/wrapperspb" + + "github.com/jellydator/ttlcache/v2" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature" + sigkms "github.com/sigstore/sigstore/pkg/signature/kms" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +func init() { + sigkms.AddProvider(ReferenceScheme, func(ctx context.Context, keyResourceID string, _ crypto.Hash, opts ...signature.RPCOption) (sigkms.SignerVerifier, error) { + return LoadSignerVerifier(ctx, keyResourceID) + }) +} + +//nolint:revive +const ( + AlgorithmECDSAP256SHA256 = "ecdsa-p256-sha256" + AlgorithmECDSAP384SHA384 = "ecdsa-p384-sha384" + AlgorithmRSAPKCS1v152048SHA256 = "rsa-pkcs1v15-2048-sha256" + AlgorithmRSAPKCS1v153072SHA256 = "rsa-pkcs1v15-3072-sha256" + AlgorithmRSAPKCS1v154096SHA256 = "rsa-pkcs1v15-4096-sha256" + AlgorithmRSAPKCS1v154096SHA512 = "rsa-pkcs1v15-4096-sha512" + AlgorithmRSAPSS2048SHA256 = "rsa-pss-2048-sha256" + AlgorithmRSAPSS3072SHA256 = "rsa-pss-3072-sha256" + AlgorithmRSAPSS4096SHA256 = "rsa-pss-4096-sha256" + AlgorithmRSAPSS4096SHA512 = "rsa-pss-4096-sha512" +) + +var algorithmMap = map[string]kmspb.CryptoKeyVersion_CryptoKeyVersionAlgorithm{ + AlgorithmECDSAP256SHA256: kmspb.CryptoKeyVersion_EC_SIGN_P256_SHA256, + AlgorithmECDSAP384SHA384: kmspb.CryptoKeyVersion_EC_SIGN_P384_SHA384, + AlgorithmRSAPKCS1v152048SHA256: kmspb.CryptoKeyVersion_RSA_SIGN_PKCS1_2048_SHA256, + AlgorithmRSAPKCS1v153072SHA256: kmspb.CryptoKeyVersion_RSA_SIGN_PKCS1_3072_SHA256, + AlgorithmRSAPKCS1v154096SHA256: kmspb.CryptoKeyVersion_RSA_SIGN_PKCS1_4096_SHA256, + AlgorithmRSAPKCS1v154096SHA512: kmspb.CryptoKeyVersion_RSA_SIGN_PKCS1_4096_SHA512, + AlgorithmRSAPSS2048SHA256: kmspb.CryptoKeyVersion_RSA_SIGN_PSS_2048_SHA256, + AlgorithmRSAPSS3072SHA256: kmspb.CryptoKeyVersion_RSA_SIGN_PSS_3072_SHA256, + AlgorithmRSAPSS4096SHA256: kmspb.CryptoKeyVersion_RSA_SIGN_PSS_4096_SHA256, + AlgorithmRSAPSS4096SHA512: kmspb.CryptoKeyVersion_RSA_SIGN_PSS_4096_SHA512, +} + +type gcpClient struct { + defaultCtx context.Context + refString string + projectID string + locationID string + keyRing string + keyName string + version string + kvCache *ttlcache.Cache + kmsClient *gcpkms.KeyManagementClient +} + +func newGCPClient(ctx context.Context, refStr string, opts ...option.ClientOption) (*gcpClient, error) { + if err := ValidReference(refStr); err != nil { + return nil, err + } + + if ctx == nil { + ctx = context.Background() + } + + g := &gcpClient{ + defaultCtx: ctx, + refString: refStr, + kvCache: ttlcache.NewCache(), + } + var err error + g.projectID, g.locationID, g.keyRing, g.keyName, g.version, err = parseReference(refStr) + if err != nil { + return nil, err + } + + g.kmsClient, err = gcpkms.NewKeyManagementClient(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("new gcp kms client: %w", err) + } + + g.kvCache.SetLoaderFunction(g.kvCacheLoaderFunction) + g.kvCache.SkipTTLExtensionOnHit(true) + // prime the cache + _, err = g.kvCache.Get(cacheKey) + if err != nil { + return nil, fmt.Errorf("initializing key version from GCP KMS: %w", err) + } + return g, nil +} + +var ( + errKMSReference = errors.New("kms specification should be in the format gcpkms://projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEY_RING]/cryptoKeys/[KEY]/cryptoKeyVersions/[VERSION]") + + re = regexp.MustCompile(`^gcpkms://projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)(?:/(?:cryptoKeyVersions|versions)/([^/]+))?$`) +) + +// ReferenceScheme schemes for various KMS services are copied from https://github.com/google/go-cloud/tree/master/secrets +const ReferenceScheme = "gcpkms://" + +// ValidReference returns a non-nil error if the reference string is invalid +func ValidReference(ref string) error { + if !re.MatchString(ref) { + return errKMSReference + } + return nil +} + +func parseReference(resourceID string) (projectID, locationID, keyRing, keyName, version string, err error) { + v := re.FindStringSubmatch(resourceID) + if len(v) != 6 { + err = fmt.Errorf("invalid gcpkms format %q", resourceID) + return + } + projectID, locationID, keyRing, keyName, version = v[1], v[2], v[3], v[4], v[5] + return +} + +type cryptoKeyVersion struct { + CryptoKeyVersion *kmspb.CryptoKeyVersion + Verifier signature.Verifier + HashFunc crypto.Hash +} + +// use a consistent key for cache lookups +const cacheKey = "crypto_key_version" + +func (g *gcpClient) kvCacheLoaderFunction(key string) (data interface{}, ttl time.Duration, err error) { + // if we're given an explicit version, cache this value forever + if g.version != "" { + ttl = time.Second * 0 + } else { + ttl = time.Second * 300 + } + data, err = g.keyVersionName(context.Background()) + + return +} + +// keyVersionName returns the first key version found for a key in KMS +func (g *gcpClient) keyVersionName(ctx context.Context) (*cryptoKeyVersion, error) { + parent := fmt.Sprintf("projects/%s/locations/%s/keyRings/%s/cryptoKeys/%s", g.projectID, g.locationID, g.keyRing, g.keyName) + + parentReq := &kmspb.GetCryptoKeyRequest{ + Name: parent, + } + key, err := g.kmsClient.GetCryptoKey(ctx, parentReq) + if err != nil { + return nil, err + } + if key.Purpose != kmspb.CryptoKey_ASYMMETRIC_SIGN { + return nil, errors.New("specified key cannot be used to sign") + } + + // if g.version was specified, use it explicitly + var kv *kmspb.CryptoKeyVersion + if g.version != "" { + req := &kmspb.GetCryptoKeyVersionRequest{ + Name: parent + fmt.Sprintf("/cryptoKeyVersions/%s", g.version), + } + kv, err = g.kmsClient.GetCryptoKeyVersion(ctx, req) + if err != nil { + return nil, err + } + } else { + req := &kmspb.ListCryptoKeyVersionsRequest{ + Parent: parent, + Filter: "state=ENABLED", + OrderBy: "name desc", + } + iterator := g.kmsClient.ListCryptoKeyVersions(ctx, req) + + // pick the key version that is enabled with the greatest version value + kv, err = iterator.Next() + if err != nil { + return nil, fmt.Errorf("unable to find an enabled key version in GCP KMS: %w", err) + } + } + // kv is keyVersion to use + crv := cryptoKeyVersion{ + CryptoKeyVersion: kv, + } + + pubKey, err := g.fetchPublicKey(ctx, kv.Name) + if err != nil { + return nil, fmt.Errorf("unable to fetch public key while creating signer: %w", err) + } + + // crv.Verifier is set here to enable storing the public key & hash algorithm together, + // as well as using the in memory Verifier to perform the verify operations. + switch kv.Algorithm { + case kmspb.CryptoKeyVersion_EC_SIGN_P256_SHA256: + crv.Verifier, err = signature.LoadECDSAVerifier(pubKey.(*ecdsa.PublicKey), crypto.SHA256) + crv.HashFunc = crypto.SHA256 + case kmspb.CryptoKeyVersion_EC_SIGN_P384_SHA384: + crv.Verifier, err = signature.LoadECDSAVerifier(pubKey.(*ecdsa.PublicKey), crypto.SHA384) + crv.HashFunc = crypto.SHA384 + case kmspb.CryptoKeyVersion_RSA_SIGN_PKCS1_2048_SHA256, + kmspb.CryptoKeyVersion_RSA_SIGN_PKCS1_3072_SHA256, + kmspb.CryptoKeyVersion_RSA_SIGN_PKCS1_4096_SHA256: + crv.Verifier, err = signature.LoadRSAPKCS1v15Verifier(pubKey.(*rsa.PublicKey), crypto.SHA256) + crv.HashFunc = crypto.SHA256 + case kmspb.CryptoKeyVersion_RSA_SIGN_PKCS1_4096_SHA512: + crv.Verifier, err = signature.LoadRSAPKCS1v15Verifier(pubKey.(*rsa.PublicKey), crypto.SHA512) + crv.HashFunc = crypto.SHA512 + case kmspb.CryptoKeyVersion_RSA_SIGN_PSS_2048_SHA256, + kmspb.CryptoKeyVersion_RSA_SIGN_PSS_3072_SHA256, + kmspb.CryptoKeyVersion_RSA_SIGN_PSS_4096_SHA256: + crv.Verifier, err = signature.LoadRSAPSSVerifier(pubKey.(*rsa.PublicKey), crypto.SHA256, nil) + crv.HashFunc = crypto.SHA256 + case kmspb.CryptoKeyVersion_RSA_SIGN_PSS_4096_SHA512: + crv.Verifier, err = signature.LoadRSAPSSVerifier(pubKey.(*rsa.PublicKey), crypto.SHA512, nil) + crv.HashFunc = crypto.SHA512 + default: + return nil, errors.New("unknown algorithm specified by KMS") + } + if err != nil { + return nil, fmt.Errorf("initializing internal verifier: %w", err) + } + return &crv, nil +} + +func (g *gcpClient) fetchPublicKey(ctx context.Context, name string) (crypto.PublicKey, error) { + // Build the request. + pkreq := &kmspb.GetPublicKeyRequest{Name: name} + // Call the API. + pk, err := g.kmsClient.GetPublicKey(ctx, pkreq) + if err != nil { + return nil, fmt.Errorf("public key: %w", err) + } + return cryptoutils.UnmarshalPEMToPublicKey([]byte(pk.GetPem())) +} + +func (g *gcpClient) getHashFunc() (crypto.Hash, error) { + ckv, err := g.getCKV() + if err != nil { + return 0, err + } + return ckv.HashFunc, nil +} + +// getCKV gets the latest CryptoKeyVersion from the client's cache, which may trigger an actual +// call to GCP if the existing entry in the cache has expired. +func (g *gcpClient) getCKV() (*cryptoKeyVersion, error) { + // we get once and use consistently to ensure the cache value doesn't change underneath us + kmsVersionInt, err := g.kvCache.Get(cacheKey) + if err != nil { + return nil, err + } + + kv, ok := kmsVersionInt.(*cryptoKeyVersion) + if !ok { + return nil, fmt.Errorf("could not parse kms version cache value as CryptoKeyVersion") + } + + return kv, nil +} + +func (g *gcpClient) sign(ctx context.Context, digest []byte, alg crypto.Hash, crc uint32) ([]byte, error) { + ckv, err := g.getCKV() + if err != nil { + return nil, err + } + + gcpSignReq := kmspb.AsymmetricSignRequest{ + Name: ckv.CryptoKeyVersion.Name, + Digest: &kmspb.Digest{}, + } + + if crc != 0 { + gcpSignReq.DigestCrc32C = wrapperspb.Int64(int64(crc)) + } + + switch alg { + case crypto.SHA256: + gcpSignReq.Digest.Digest = &kmspb.Digest_Sha256{ + Sha256: digest, + } + case crypto.SHA384: + gcpSignReq.Digest.Digest = &kmspb.Digest_Sha384{ + Sha384: digest, + } + case crypto.SHA512: + gcpSignReq.Digest.Digest = &kmspb.Digest_Sha512{ + Sha512: digest, + } + default: + return nil, errors.New("unsupported hash function") + } + + resp, err := g.kmsClient.AsymmetricSign(ctx, &gcpSignReq) + if err != nil { + return nil, fmt.Errorf("calling GCP AsymmetricSign: %w", err) + } + + // Optional, but recommended: perform integrity verification on result. + // For more details on ensuring E2E in-transit integrity to and from Cloud KMS visit: + // https://cloud.google.com/kms/docs/data-integrity-guidelines + if crc != 0 && !resp.VerifiedDigestCrc32C { + return nil, fmt.Errorf("AsymmetricSign: request corrupted in-transit") + } + if int64(crc32.Checksum(resp.Signature, crc32.MakeTable(crc32.Castagnoli))) != resp.SignatureCrc32C.Value { + return nil, fmt.Errorf("AsymmetricSign: response corrupted in-transit") + } + + return resp.Signature, nil +} + +func (g *gcpClient) public(ctx context.Context) (crypto.PublicKey, error) { + crv, err := g.getCKV() + if err != nil { + return nil, fmt.Errorf("transient error getting info from KMS: %w", err) + } + return crv.Verifier.PublicKey(options.WithContext(ctx)) +} + +func (g *gcpClient) verify(sig, message io.Reader, opts ...signature.VerifyOption) error { + crv, err := g.getCKV() + if err != nil { + return fmt.Errorf("transient error getting info from KMS: %w", err) + } + if err := crv.Verifier.VerifySignature(sig, message, opts...); err != nil { + // key could have been rotated, clear cache and try again if we're not pinned to a version + if g.version == "" { + _ = g.kvCache.Remove(cacheKey) + crv, err = g.getCKV() + if err != nil { + return fmt.Errorf("transient error getting info from KMS: %w", err) + } + return crv.Verifier.VerifySignature(sig, message, opts...) + } + return fmt.Errorf("failed to verify for fixed version: %w", err) + } + return nil +} + +func (g *gcpClient) createKey(ctx context.Context, algorithm string) (crypto.PublicKey, error) { + if err := g.createKeyRing(ctx); err != nil { + return nil, fmt.Errorf("creating key ring: %w", err) + } + + getKeyRequest := &kmspb.GetCryptoKeyRequest{ + Name: fmt.Sprintf("projects/%s/locations/%s/keyRings/%s/cryptoKeys/%s", g.projectID, g.locationID, g.keyRing, g.keyName), + } + if _, err := g.kmsClient.GetCryptoKey(ctx, getKeyRequest); err == nil { + return g.public(ctx) + } + + if _, ok := algorithmMap[algorithm]; !ok { + return nil, errors.New("unknown algorithm requested") + } + + createKeyRequest := &kmspb.CreateCryptoKeyRequest{ + Parent: fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", g.projectID, g.locationID, g.keyRing), + CryptoKeyId: g.keyName, + CryptoKey: &kmspb.CryptoKey{ + Purpose: kmspb.CryptoKey_ASYMMETRIC_SIGN, + VersionTemplate: &kmspb.CryptoKeyVersionTemplate{ + Algorithm: algorithmMap[algorithm], + }, + }, + } + if _, err := g.kmsClient.CreateCryptoKey(ctx, createKeyRequest); err != nil { + return nil, fmt.Errorf("creating crypto key: %w", err) + } + return g.public(ctx) +} + +func (g *gcpClient) createKeyRing(ctx context.Context) error { + getKeyRingRequest := &kmspb.GetKeyRingRequest{ + Name: fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", g.projectID, g.locationID, g.keyRing), + } + if result, err := g.kmsClient.GetKeyRing(ctx, getKeyRingRequest); err == nil { + log.Printf("Key ring %s already exists in GCP KMS, moving on to creating key.\n", result.GetName()) + // key ring already exists, no need to create + return nil + } + // try to create key ring + createKeyRingRequest := &kmspb.CreateKeyRingRequest{ + Parent: fmt.Sprintf("projects/%s/locations/%s", g.projectID, g.locationID), + KeyRingId: g.keyRing, + } + result, err := g.kmsClient.CreateKeyRing(ctx, createKeyRingRequest) + log.Printf("Created key ring %s in GCP KMS.\n", result.GetName()) + return err +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/doc.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/doc.go new file mode 100644 index 00000000000..3ecf441189a --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/doc.go @@ -0,0 +1,17 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package gcp contains utilities related to Google Cloud Platform KMS. +package gcp diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/signer.go new file mode 100644 index 00000000000..445f9d9a558 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/signer.go @@ -0,0 +1,195 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcp + +import ( + "context" + "crypto" + "fmt" + "hash/crc32" + "io" + + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" + "google.golang.org/api/option" +) + +var gcpSupportedHashFuncs = []crypto.Hash{ + crypto.SHA256, + crypto.SHA512, + crypto.SHA384, +} + +// SignerVerifier creates and verifies digital signatures over a message using GCP KMS service +type SignerVerifier struct { + defaultCtx context.Context + client *gcpClient +} + +// LoadSignerVerifier generates signatures using the specified key object in GCP KMS and hash algorithm. +// +// It also can verify signatures locally using the public key. hashFunc must not be crypto.Hash(0). +func LoadSignerVerifier(defaultCtx context.Context, referenceStr string, opts ...option.ClientOption) (*SignerVerifier, error) { + g := &SignerVerifier{ + defaultCtx: defaultCtx, + } + + var err error + g.client, err = newGCPClient(defaultCtx, referenceStr, opts...) + if err != nil { + return nil, err + } + + return g, nil +} + +// SignMessage signs the provided message using GCP KMS. If the message is provided, +// this method will compute the digest according to the hash function specified +// when the Signer was created. +// +// SignMessage recognizes the following Options listed in order of preference: +// +// - WithContext() +// +// - WithDigest() +// +// - WithCryptoSignerOpts() +// +// All other options are ignored if specified. +func (g *SignerVerifier) SignMessage(message io.Reader, opts ...signature.SignOption) ([]byte, error) { + ctx := context.Background() + var digest []byte + var signerOpts crypto.SignerOpts + var err error + + signerOpts, err = g.client.getHashFunc() + if err != nil { + return nil, fmt.Errorf("getting fetching default hash function: %w", err) + } + + for _, opt := range opts { + opt.ApplyContext(&ctx) + opt.ApplyDigest(&digest) + opt.ApplyCryptoSignerOpts(&signerOpts) + } + + digest, hf, err := signature.ComputeDigestForSigning(message, signerOpts.HashFunc(), gcpSupportedHashFuncs, opts...) + if err != nil { + return nil, err + } + + crc32cHasher := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + _, err = crc32cHasher.Write(digest) + if err != nil { + return nil, err + } + + return g.client.sign(ctx, digest, hf, crc32cHasher.Sum32()) +} + +// PublicKey returns the public key that can be used to verify signatures created by +// this signer. If the caller wishes to specify the context to use to obtain +// the public key, pass option.WithContext(desiredCtx). +// +// All other options are ignored if specified. +func (g *SignerVerifier) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) { + ctx := context.Background() + for _, opt := range opts { + opt.ApplyContext(&ctx) + } + + return g.client.public(ctx) +} + +// VerifySignature verifies the signature for the given message. Unless provided +// in an option, the digest of the message will be computed using the hash function specified +// when the SignerVerifier was created. +// +// This function returns nil if the verification succeeded, and an error message otherwise. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// All other options are ignored if specified. +func (g *SignerVerifier) VerifySignature(signature, message io.Reader, opts ...signature.VerifyOption) error { + return g.client.verify(signature, message, opts...) +} + +// CreateKey attempts to create a new key in Vault with the specified algorithm. +func (g *SignerVerifier) CreateKey(ctx context.Context, algorithm string) (crypto.PublicKey, error) { + return g.client.createKey(ctx, algorithm) +} + +type cryptoSignerWrapper struct { + ctx context.Context + hashFunc crypto.Hash + sv *SignerVerifier + errFunc func(error) +} + +func (c cryptoSignerWrapper) Public() crypto.PublicKey { + pk, err := c.sv.PublicKey(options.WithContext(c.ctx)) + if err != nil && c.errFunc != nil { + c.errFunc(err) + } + return pk +} + +func (c cryptoSignerWrapper) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) { + hashFunc := c.hashFunc + if opts != nil { + hashFunc = opts.HashFunc() + } + gcpOptions := []signature.SignOption{ + options.WithContext(c.ctx), + options.WithDigest(digest), + options.WithCryptoSignerOpts(hashFunc), + } + + return c.sv.SignMessage(nil, gcpOptions...) +} + +// CryptoSigner returns a crypto.Signer object that uses the underlying SignerVerifier, along with a crypto.SignerOpts object +// that allows the KMS to be used in APIs that only accept the standard golang objects +func (g *SignerVerifier) CryptoSigner(ctx context.Context, errFunc func(error)) (crypto.Signer, crypto.SignerOpts, error) { + defaultHf, err := g.client.getHashFunc() + if err != nil { + return nil, nil, fmt.Errorf("getting fetching default hash function: %w", err) + } + + csw := &cryptoSignerWrapper{ + ctx: ctx, + sv: g, + hashFunc: defaultHf, + errFunc: errFunc, + } + + return csw, defaultHf, nil +} + +// SupportedAlgorithms returns the list of algorithms supported by the GCP KMS service +func (g *SignerVerifier) SupportedAlgorithms() (result []string) { + for k := range algorithmMap { + result = append(result, k) + } + return +} + +// DefaultAlgorithm returns the default algorithm for the GCP KMS service +func (g *SignerVerifier) DefaultAlgorithm() string { + return AlgorithmECDSAP256SHA256 +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/client.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/client.go new file mode 100644 index 00000000000..8f54acda3af --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/client.go @@ -0,0 +1,381 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package hashivault implement the interface with hashivault kms service +package hashivault + +import ( + "context" + "crypto" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "log" + "os" + "path/filepath" + "regexp" + "strconv" + "time" + + vault "github.com/hashicorp/vault/api" + "github.com/jellydator/ttlcache/v2" + "github.com/mitchellh/go-homedir" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature" + sigkms "github.com/sigstore/sigstore/pkg/signature/kms" +) + +func init() { + sigkms.AddProvider(ReferenceScheme, func(_ context.Context, keyResourceID string, hashFunc crypto.Hash, opts ...signature.RPCOption) (sigkms.SignerVerifier, error) { + return LoadSignerVerifier(keyResourceID, hashFunc, opts...) + }) +} + +type hashivaultClient struct { + client *vault.Client + keyPath string + transitSecretEnginePath string + keyCache *ttlcache.Cache + keyVersion uint64 +} + +var ( + errReference = errors.New("kms specification should be in the format hashivault://") + referenceRegex = regexp.MustCompile(`^hashivault://(?P\w(([\w-.]+)?\w)?)$`) + prefixRegex = regexp.MustCompile("^vault:v[0-9]+:") +) + +const ( + vaultV1DataPrefix = "vault:v1:" + + // use a consistent key for cache lookups + cacheKey = "signer" + + // ReferenceScheme schemes for various KMS services are copied from https://github.com/google/go-cloud/tree/master/secrets + ReferenceScheme = "hashivault://" +) + +// ValidReference returns a non-nil error if the reference string is invalid +func ValidReference(ref string) error { + if !referenceRegex.MatchString(ref) { + return errReference + } + return nil +} + +func parseReference(resourceID string) (keyPath string, err error) { + i := referenceRegex.SubexpIndex("path") + v := referenceRegex.FindStringSubmatch(resourceID) + if len(v) < i+1 { + err = fmt.Errorf("invalid vault format %q: %w", resourceID, err) + return + } + keyPath = v[i] + return +} + +func newHashivaultClient(address, token, transitSecretEnginePath, keyResourceID string, keyVersion uint64) (*hashivaultClient, error) { + if err := ValidReference(keyResourceID); err != nil { + return nil, err + } + + keyPath, err := parseReference(keyResourceID) + if err != nil { + return nil, err + } + + if address == "" { + address = os.Getenv("VAULT_ADDR") + } + if address == "" { + return nil, errors.New("VAULT_ADDR is not set") + } + + client, err := vault.NewClient(&vault.Config{ + Address: address, + }) + if err != nil { + return nil, fmt.Errorf("new vault client: %w", err) + } + + if token == "" { + token = os.Getenv("VAULT_TOKEN") + } + if token == "" { + log.Printf("VAULT_TOKEN is not set, trying to read token from file at path ~/.vault-token") + homeDir, err := homedir.Dir() + if err != nil { + return nil, fmt.Errorf("get home directory: %w", err) + } + + tokenFromFile, err := os.ReadFile(filepath.Join(homeDir, ".vault-token")) + if err != nil { + return nil, fmt.Errorf("read .vault-token file: %w", err) + } + + token = string(tokenFromFile) + } + client.SetToken(token) + + if transitSecretEnginePath == "" { + transitSecretEnginePath = os.Getenv("TRANSIT_SECRET_ENGINE_PATH") + } + if transitSecretEnginePath == "" { + transitSecretEnginePath = "transit" + } + + hvClient := &hashivaultClient{ + client: client, + keyPath: keyPath, + transitSecretEnginePath: transitSecretEnginePath, + keyCache: ttlcache.NewCache(), + keyVersion: keyVersion, + } + hvClient.keyCache.SetLoaderFunction(hvClient.keyCacheLoaderFunction) + hvClient.keyCache.SkipTTLExtensionOnHit(true) + + return hvClient, nil +} + +func oidcLogin(_ context.Context, address, path, role, token string) (string, error) { + if address == "" { + address = os.Getenv("VAULT_ADDR") + } + if address == "" { + return "", errors.New("VAULT_ADDR is not set") + } + if path == "" { + path = "jwt" + } + + client, err := vault.NewClient(&vault.Config{ + Address: address, + }) + if err != nil { + return "", fmt.Errorf("new vault client: %w", err) + } + + loginData := map[string]interface{}{ + "role": role, + "jwt": token, + } + fullpath := fmt.Sprintf("auth/%s/login", path) + resp, err := client.Logical().Write(fullpath, loginData) + if err != nil { + return "", fmt.Errorf("vault oidc login: %w", err) + } + return resp.TokenID() +} + +func (h *hashivaultClient) keyCacheLoaderFunction(key string) (data interface{}, ttl time.Duration, err error) { + ttl = time.Second * 300 + var pubKey crypto.PublicKey + pubKey, err = h.fetchPublicKey(context.Background()) + if err != nil { + data = nil + return + } + data = pubKey + return data, ttl, err +} + +func (h *hashivaultClient) fetchPublicKey(_ context.Context) (crypto.PublicKey, error) { + client := h.client.Logical() + + path := fmt.Sprintf("/%s/keys/%s", h.transitSecretEnginePath, h.keyPath) + + keyResult, err := client.Read(path) + if err != nil { + return nil, fmt.Errorf("public key: %w", err) + } + + if keyResult == nil { + return nil, fmt.Errorf("could not read data from transit key path: %s", path) + } + + keysData, hasKeys := keyResult.Data["keys"] + latestVersion, hasVersion := keyResult.Data["latest_version"] + if !hasKeys || !hasVersion { + return nil, errors.New("failed to read transit key keys: corrupted response") + } + + keys, ok := keysData.(map[string]interface{}) + if !ok { + return nil, errors.New("failed to read transit key keys: Invalid keys map") + } + + keyVersion, ok := latestVersion.(json.Number) + if !ok { + return nil, fmt.Errorf("format of 'latest_version' is not json.Number") + } + + keyData, ok := keys[string(keyVersion)] + if !ok { + return nil, errors.New("failed to read transit key keys: corrupted response") + } + + keyMap, ok := keyData.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("could not parse transit key keys data as map[string]interface{}") + } + + publicKeyPem, ok := keyMap["public_key"] + if !ok { + return nil, errors.New("failed to read transit key keys: corrupted response") + } + + strPublicKeyPem, ok := publicKeyPem.(string) + if !ok { + return nil, fmt.Errorf("could not parse public key pem as string") + } + + return cryptoutils.UnmarshalPEMToPublicKey([]byte(strPublicKeyPem)) +} + +func (h *hashivaultClient) public() (crypto.PublicKey, error) { + return h.keyCache.Get(cacheKey) +} + +func (h hashivaultClient) sign(digest []byte, alg crypto.Hash, opts ...signature.SignOption) ([]byte, error) { + client := h.client.Logical() + + keyVersion := fmt.Sprintf("%d", h.keyVersion) + var keyVersionUsedPtr *string + for _, opt := range opts { + opt.ApplyKeyVersion(&keyVersion) + opt.ApplyKeyVersionUsed(&keyVersionUsedPtr) + } + + if keyVersion != "" { + if _, err := strconv.ParseUint(keyVersion, 10, 64); err != nil { + return nil, fmt.Errorf("parsing requested key version: %w", err) + } + } + + signResult, err := client.Write(fmt.Sprintf("/%s/sign/%s%s", h.transitSecretEnginePath, h.keyPath, hashString(alg)), map[string]interface{}{ + "input": base64.StdEncoding.Strict().EncodeToString(digest), + "prehashed": alg != crypto.Hash(0), + "key_version": keyVersion, + }) + if err != nil { + return nil, fmt.Errorf("transit: failed to sign payload: %w", err) + } + + encodedSignature, ok := signResult.Data["signature"] + if !ok { + return nil, errors.New("transit: response corrupted in-transit") + } + + return vaultDecode(encodedSignature, keyVersionUsedPtr) +} + +func (h hashivaultClient) verify(sig, digest []byte, alg crypto.Hash, opts ...signature.VerifyOption) error { + client := h.client.Logical() + encodedSig := base64.StdEncoding.EncodeToString(sig) + + keyVersion := "" + for _, opt := range opts { + opt.ApplyKeyVersion(&keyVersion) + } + + var vaultDataPrefix string + if keyVersion != "" { + // keyVersion >= 1 on verification but can be set to 0 on signing + kvUint, err := strconv.ParseUint(keyVersion, 10, 64) + if err != nil { + return fmt.Errorf("parsing requested key version: %w", err) + } else if kvUint == 0 { + return errors.New("key version must be >= 1") + } + + vaultDataPrefix = fmt.Sprintf("vault:v%d:", kvUint) + } else { + vaultDataPrefix = os.Getenv("VAULT_KEY_PREFIX") + if vaultDataPrefix == "" { + if h.keyVersion > 0 { + vaultDataPrefix = fmt.Sprintf("vault:v%d:", h.keyVersion) + } else { + vaultDataPrefix = vaultV1DataPrefix + } + } + } + + result, err := client.Write(fmt.Sprintf("/%s/verify/%s/%s", h.transitSecretEnginePath, h.keyPath, hashString(alg)), map[string]interface{}{ + "input": base64.StdEncoding.EncodeToString(digest), + "prehashed": alg != crypto.Hash(0), + "signature": fmt.Sprintf("%s%s", vaultDataPrefix, encodedSig), + }) + if err != nil { + return fmt.Errorf("verify: %w", err) + } + + valid, ok := result.Data["valid"] + if !ok { + return errors.New("corrupted response") + } + + isValid, ok := valid.(bool) + if !ok { + return fmt.Errorf("received non-bool value from 'valid' key") + } + + if !isValid { + return errors.New("failed vault verification") + } + + return nil +} + +// Vault likes to prefix base64 data with a version prefix +func vaultDecode(data interface{}, keyVersionUsed *string) ([]byte, error) { + encoded, ok := data.(string) + if !ok { + return nil, errors.New("received non-string data") + } + + if keyVersionUsed != nil { + *keyVersionUsed = prefixRegex.FindString(encoded) + } + return base64.StdEncoding.DecodeString(prefixRegex.ReplaceAllString(encoded, "")) +} + +func hashString(h crypto.Hash) string { + var hashStr string + switch h { + case crypto.SHA224: + hashStr = "/sha2-224" + case crypto.SHA256: + hashStr = "/sha2-256" + case crypto.SHA384: + hashStr = "/sha2-384" + case crypto.SHA512: + hashStr = "/sha2-512" + default: + hashStr = "" + } + return hashStr +} + +func (h hashivaultClient) createKey(typeStr string) (crypto.PublicKey, error) { + client := h.client.Logical() + + if _, err := client.Write(fmt.Sprintf("/%s/keys/%s", h.transitSecretEnginePath, h.keyPath), map[string]interface{}{ + "type": typeStr, + }); err != nil { + return nil, fmt.Errorf("failed to create transit key: %w", err) + } + return h.public() +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/doc.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/doc.go new file mode 100644 index 00000000000..27520ba03d5 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/doc.go @@ -0,0 +1,17 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package hashivault contains utilities related to Hashivault KMS. +package hashivault diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/signer.go new file mode 100644 index 00000000000..1965f317d10 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/signer.go @@ -0,0 +1,233 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hashivault + +import ( + "context" + "crypto" + "errors" + "fmt" + "io" + "strconv" + + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// Taken from https://www.vaultproject.io/api/secret/transit +// nolint:revive +const ( + AlgorithmECDSAP256 = "ecdsa-p256" + AlgorithmECDSAP384 = "ecdsa-p384" + AlgorithmECDSAP521 = "ecdsa-p521" + AlgorithmED25519 = "ed25519" + AlgorithmRSA2048 = "rsa-2048" + AlgorithmRSA3072 = "rsa-3072" + AlgorithmRSA4096 = "rsa-4096" +) + +var hvSupportedAlgorithms = []string{ + AlgorithmECDSAP256, + AlgorithmECDSAP384, + AlgorithmECDSAP521, + AlgorithmED25519, + AlgorithmRSA2048, + AlgorithmRSA3072, + AlgorithmRSA4096, +} + +var hvSupportedHashFuncs = []crypto.Hash{ + crypto.SHA224, + crypto.SHA256, + crypto.SHA384, + crypto.SHA512, + crypto.Hash(0), +} + +// SignerVerifier creates and verifies digital signatures over a message using Hashicorp Vault KMS service +type SignerVerifier struct { + hashFunc crypto.Hash + client *hashivaultClient +} + +// LoadSignerVerifier generates signatures using the specified key object in Vault and hash algorithm. +// +// It also can verify signatures (via a remote vall to the Vault instance). hashFunc should be +// set to crypto.Hash(0) if the key referred to by referenceStr is an ED25519 signing key. +func LoadSignerVerifier(referenceStr string, hashFunc crypto.Hash, opts ...signature.RPCOption) (*SignerVerifier, error) { + h := &SignerVerifier{} + ctx := context.Background() + rpcAuth := options.RPCAuth{} + var keyVersion string + for _, opt := range opts { + opt.ApplyRPCAuthOpts(&rpcAuth) + opt.ApplyContext(&ctx) + opt.ApplyKeyVersion(&keyVersion) + } + + var keyVersionUint uint64 + var err error + if keyVersion != "" { + keyVersionUint, err = strconv.ParseUint(keyVersion, 10, 64) + if err != nil { + return nil, fmt.Errorf("parsing key version: %w", err) + } + } + + if rpcAuth.OIDC.Token != "" { + rpcAuth.Token, err = oidcLogin(ctx, rpcAuth.Address, rpcAuth.OIDC.Path, rpcAuth.OIDC.Role, rpcAuth.OIDC.Token) + if err != nil { + return nil, err + } + } + h.client, err = newHashivaultClient(rpcAuth.Address, rpcAuth.Token, rpcAuth.Path, referenceStr, keyVersionUint) + if err != nil { + return nil, err + } + + switch hashFunc { + case 0, crypto.SHA224, crypto.SHA256, crypto.SHA384, crypto.SHA512: + h.hashFunc = hashFunc + default: + return nil, errors.New("hash function not supported by Hashivault") + } + + return h, nil +} + +// SignMessage signs the provided message using HashiCorp Vault KMS. If the message is provided, +// this method will compute the digest according to the hash function specified +// when the HashivaultSigner was created. +// +// SignMessage recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// All other options are ignored if specified. +func (h SignerVerifier) SignMessage(message io.Reader, opts ...signature.SignOption) ([]byte, error) { + var digest []byte + var signerOpts crypto.SignerOpts = h.hashFunc + + for _, opt := range opts { + opt.ApplyDigest(&digest) + opt.ApplyCryptoSignerOpts(&signerOpts) + } + + digest, hf, err := signature.ComputeDigestForSigning(message, signerOpts.HashFunc(), hvSupportedHashFuncs, opts...) + if err != nil { + return nil, err + } + + return h.client.sign(digest, hf, opts...) +} + +// PublicKey returns the public key that can be used to verify signatures created by +// this signer. All options provided in arguments to this method are ignored. +func (h SignerVerifier) PublicKey(_ ...signature.PublicKeyOption) (crypto.PublicKey, error) { + return h.client.public() +} + +// VerifySignature verifies the signature for the given message. Unless provided +// in an option, the digest of the message will be computed using the hash function specified +// when the SignerVerifier was created. +// +// This function returns nil if the verification succeeded, and an error message otherwise. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// - WithCryptoSignerOpts() +// +// All other options are ignored if specified. +func (h SignerVerifier) VerifySignature(sig, message io.Reader, opts ...signature.VerifyOption) error { + var digest []byte + var signerOpts crypto.SignerOpts = h.hashFunc + + for _, opt := range opts { + opt.ApplyDigest(&digest) + opt.ApplyCryptoSignerOpts(&signerOpts) + } + + digest, hf, err := signature.ComputeDigestForVerifying(message, signerOpts.HashFunc(), hvSupportedHashFuncs, opts...) + if err != nil { + return err + } + + sigBytes, err := io.ReadAll(sig) + if err != nil { + return fmt.Errorf("reading signature: %w", err) + } + + return h.client.verify(sigBytes, digest, hf, opts...) +} + +// CreateKey attempts to create a new key in Vault with the specified algorithm. +func (h SignerVerifier) CreateKey(_ context.Context, algorithm string) (crypto.PublicKey, error) { + return h.client.createKey(algorithm) +} + +type cryptoSignerWrapper struct { + ctx context.Context + hashFunc crypto.Hash + sv *SignerVerifier + errFunc func(error) +} + +func (c cryptoSignerWrapper) Public() crypto.PublicKey { + pk, err := c.sv.PublicKey(options.WithContext(c.ctx)) + if err != nil && c.errFunc != nil { + c.errFunc(err) + } + return pk +} + +func (c cryptoSignerWrapper) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) { + hashFunc := c.hashFunc + if opts != nil { + hashFunc = opts.HashFunc() + } + hvOptions := []signature.SignOption{ + options.WithContext(c.ctx), + options.WithDigest(digest), + options.WithCryptoSignerOpts(hashFunc), + } + + return c.sv.SignMessage(nil, hvOptions...) +} + +// CryptoSigner returns a crypto.Signer object that uses the underlying SignerVerifier, along with a crypto.SignerOpts object +// that allows the KMS to be used in APIs that only accept the standard golang objects +func (h *SignerVerifier) CryptoSigner(ctx context.Context, errFunc func(error)) (crypto.Signer, crypto.SignerOpts, error) { + csw := &cryptoSignerWrapper{ + ctx: ctx, + sv: h, + hashFunc: h.hashFunc, + errFunc: errFunc, + } + + return csw, h.hashFunc, nil +} + +// SupportedAlgorithms returns the list of algorithms supported by the Hashicorp Vault service +func (h *SignerVerifier) SupportedAlgorithms() []string { + return hvSupportedAlgorithms +} + +// DefaultAlgorithm returns the default algorithm for the Hashicorp Vault service +func (h *SignerVerifier) DefaultAlgorithm() string { + return AlgorithmECDSAP256 +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go new file mode 100644 index 00000000000..7095eb10fe6 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go @@ -0,0 +1,78 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package kms implements the interface to access various ksm services +package kms + +import ( + "context" + "crypto" + "fmt" + "strings" + + "github.com/sigstore/sigstore/pkg/signature" +) + +// ProviderNotFoundError indicates that no matching KMS provider was found +type ProviderNotFoundError struct { + ref string +} + +func (e *ProviderNotFoundError) Error() string { + return fmt.Sprintf("no kms provider found for key reference: %s", e.ref) +} + +// ProviderInit is a function that initializes provider-specific SignerVerifier. +// +// It takes a provider-specific resource ID and hash function, and returns a +// SignerVerifier using that resource, or any error that was encountered. +type ProviderInit func(context.Context, string, crypto.Hash, ...signature.RPCOption) (SignerVerifier, error) + +// AddProvider adds the provider implementation into the local cache +func AddProvider(keyResourceID string, init ProviderInit) { + providersMap[keyResourceID] = init +} + +var providersMap = map[string]ProviderInit{} + +// Get returns a KMS SignerVerifier for the given resource string and hash function. +// If no matching provider is found, Get returns a ProviderNotFoundError. It +// also returns an error if initializing the SignerVerifier fails. +func Get(ctx context.Context, keyResourceID string, hashFunc crypto.Hash, opts ...signature.RPCOption) (SignerVerifier, error) { + for ref, pi := range providersMap { + if strings.HasPrefix(keyResourceID, ref) { + return pi(ctx, keyResourceID, hashFunc, opts...) + } + } + return nil, &ProviderNotFoundError{ref: keyResourceID} +} + +// SupportedProviders returns list of initialized providers +func SupportedProviders() []string { + keys := make([]string, 0, len(providersMap)) + for key := range providersMap { + keys = append(keys, key) + } + return keys +} + +// SignerVerifier creates and verifies digital signatures over a message using a KMS service +type SignerVerifier interface { + signature.SignerVerifier + CreateKey(ctx context.Context, algorithm string) (crypto.PublicKey, error) + CryptoSigner(ctx context.Context, errFunc func(error)) (crypto.Signer, crypto.SignerOpts, error) + SupportedAlgorithms() []string + DefaultAlgorithm() string +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go new file mode 100644 index 00000000000..d2e98d4295b --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -0,0 +1,291 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 +// and the extendable output function (XOF) BLAKE2Xb. +// +// BLAKE2b is optimized for 64-bit platforms—including NEON-enabled ARMs—and +// produces digests of any size between 1 and 64 bytes. +// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf +// and for BLAKE2Xb see https://blake2.net/blake2x.pdf +// +// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). +// If you need a secret-key MAC (message authentication code), use the New512 +// function with a non-nil key. +// +// BLAKE2X is a construction to compute hash values larger than 64 bytes. It +// can produce hash values between 0 and 4 GiB. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2b in bytes. + BlockSize = 128 + // The hash size of BLAKE2b-512 in bytes. + Size = 64 + // The hash size of BLAKE2b-384 in bytes. + Size384 = 48 + // The hash size of BLAKE2b-256 in bytes. + Size256 = 32 +) + +var ( + useAVX2 bool + useAVX bool + useSSE4 bool +) + +var ( + errKeySize = errors.New("blake2b: invalid key size") + errHashSize = errors.New("blake2b: invalid hash size") +) + +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Sum512 returns the BLAKE2b-512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// Sum384 returns the BLAKE2b-384 checksum of the data. +func Sum384(data []byte) [Size384]byte { + var sum [Size]byte + var sum384 [Size384]byte + checkSum(&sum, Size384, data) + copy(sum384[:], sum[:Size384]) + return sum384 +} + +// Sum256 returns the BLAKE2b-256 checksum of the data. +func Sum256(data []byte) [Size256]byte { + var sum [Size]byte + var sum256 [Size256]byte + checkSum(&sum, Size256, data) + copy(sum256[:], sum[:Size256]) + return sum256 +} + +// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } + +// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } + +// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. +// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long. +// The hash size can be a value between 1 and 64 but it is highly recommended to use +// values equal or greater than: +// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). +// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). +// When the key is nil, the returned hash.Hash implements BinaryMarshaler +// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. +func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } + +func newDigest(hashSize int, key []byte) (*digest, error) { + if hashSize < 1 || hashSize > Size { + return nil, errHashSize + } + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + h := iv + h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) + var c [2]uint64 + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint64(BlockSize - offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h[:(hashSize+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } +} + +type digest struct { + h [8]uint64 + c [2]uint64 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +const ( + magic = "b2b" + marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1 +) + +func (d *digest) MarshalBinary() ([]byte, error) { + if d.keyLen != 0 { + return nil, errors.New("crypto/blake2b: cannot marshal MACs") + } + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + for i := 0; i < 8; i++ { + b = appendUint64(b, d.h[i]) + } + b = appendUint64(b, d.c[0]) + b = appendUint64(b, d.c[1]) + // Maximum value for size is 64 + b = append(b, byte(d.size)) + b = append(b, d.block[:]...) + b = append(b, byte(d.offset)) + return b, nil +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("crypto/blake2b: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("crypto/blake2b: invalid hash state size") + } + b = b[len(magic):] + for i := 0; i < 8; i++ { + b, d.h[i] = consumeUint64(b) + } + b, d.c[0] = consumeUint64(b) + b, d.c[1] = consumeUint64(b) + d.size = int(b[0]) + b = b[1:] + copy(d.block[:], b[:BlockSize]) + b = b[BlockSize:] + d.offset = int(b[0]) + return nil +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + if len(p) > 0 { + d.offset += copy(d.block[:], p) + } + + return +} + +func (d *digest) Sum(sum []byte) []byte { + var hash [Size]byte + d.finalize(&hash) + return append(sum, hash[:d.size]...) +} + +func (d *digest) finalize(hash *[Size]byte) { + var block [BlockSize]byte + copy(block[:], d.block[:d.offset]) + remaining := uint64(BlockSize - d.offset) + + c := d.c + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + h := d.h + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h { + binary.LittleEndian.PutUint64(hash[8*i:], v) + } +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.BigEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func appendUint32(b []byte, x uint32) []byte { + var a [4]byte + binary.BigEndian.PutUint32(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := binary.BigEndian.Uint64(b) + return b[8:], x +} + +func consumeUint32(b []byte) ([]byte, uint32) { + x := binary.BigEndian.Uint32(b) + return b[4:], x +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go new file mode 100644 index 00000000000..56bfaaa17da --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -0,0 +1,38 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 && amd64 && gc && !purego +// +build go1.7,amd64,gc,!purego + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useAVX2 = cpu.X86.HasAVX2 + useAVX = cpu.X86.HasAVX + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + switch { + case useAVX2: + hashBlocksAVX2(h, c, flag, blocks) + case useAVX: + hashBlocksAVX(h, c, flag, blocks) + case useSSE4: + hashBlocksSSE4(h, c, flag, blocks) + default: + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s new file mode 100644 index 00000000000..4b9daa18d9d --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -0,0 +1,745 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 && amd64 && gc && !purego +// +build go1.7,amd64,gc,!purego + +#include "textflag.h" + +DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 + +#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 +#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 +#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e +#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 +#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 + +#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ + VPADDQ m0, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m1, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y1_Y1; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y3_Y3; \ + VPADDQ m2, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m3, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y3_Y3; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y1_Y1 + +#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E +#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 +#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E +#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 +#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E + +#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n +#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n +#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n +#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n +#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n + +#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 +#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 +#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 +#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 +#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 + +#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 + +#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 +#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 + +// load msg: Y12 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y12, Y12 + +// load msg: Y13 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ + VMOVQ_SI_X13(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X13(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y13, Y13 + +// load msg: Y14 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ + VMOVQ_SI_X14(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X14(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y14, Y14 + +// load msg: Y15 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ + VMOVQ_SI_X15(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X15(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X11(6*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ + LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ + LOAD_MSG_AVX2_Y15(9, 11, 13, 15) + +#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ + LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ + LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ + VMOVQ_SI_X11(11*8); \ + VPSHUFD $0x4E, 0*8(SI), X14; \ + VPINSRQ_1_SI_X11(5*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(12, 2, 7, 3) + +#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ + VMOVQ_SI_X11(5*8); \ + VMOVDQU 11*8(SI), X12; \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + VMOVQ_SI_X13(8*8); \ + VMOVQ_SI_X11(2*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X11(13*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ + LOAD_MSG_AVX2_Y15(14, 6, 1, 4) + +#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ + LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ + LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ + LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ + VMOVQ_SI_X15(6*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X15(10*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ + LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X13(7*8); \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ + LOAD_MSG_AVX2_Y15(1, 12, 8, 13) + +#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ + LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ + LOAD_MSG_AVX2_Y15(13, 5, 14, 9) + +#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ + LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ + LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ + VMOVQ_SI_X14_0; \ + VPSHUFD $0x4E, 8*8(SI), X11; \ + VPINSRQ_1_SI_X14(6*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(7, 3, 2, 11) + +#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ + LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ + LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ + LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ + VMOVQ_SI_X15_0; \ + VMOVQ_SI_X11(6*8); \ + VPINSRQ_1_SI_X15(4*8); \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ + VMOVQ_SI_X12(6*8); \ + VMOVQ_SI_X11(11*8); \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ + VMOVQ_SI_X11(1*8); \ + VMOVDQU 12*8(SI), X14; \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + VMOVQ_SI_X15(2*8); \ + VMOVDQU 4*8(SI), X11; \ + VPINSRQ_1_SI_X15(7*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ + LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ + VMOVQ_SI_X13(2*8); \ + VPSHUFD $0x4E, 5*8(SI), X11; \ + VPINSRQ_1_SI_X13(4*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ + VMOVQ_SI_X15(11*8); \ + VMOVQ_SI_X11(12*8); \ + VPINSRQ_1_SI_X15(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y15, Y15 + +// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, DX + ADDQ $31, DX + ANDQ $~31, DX + + MOVQ CX, 16(DX) + XORQ CX, CX + MOVQ CX, 24(DX) + + VMOVDQU ·AVX2_c40<>(SB), Y4 + VMOVDQU ·AVX2_c48<>(SB), Y5 + + VMOVDQU 0(AX), Y8 + VMOVDQU 32(AX), Y9 + VMOVDQU ·AVX2_iv0<>(SB), Y6 + VMOVDQU ·AVX2_iv1<>(SB), Y7 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(DX) + +loop: + ADDQ $128, R8 + MOVQ R8, 0(DX) + CMPQ R8, $128 + JGE noinc + INCQ R9 + MOVQ R9, 8(DX) + +noinc: + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR 0(DX), Y7, Y3 + + LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() + VMOVDQA Y12, 32(DX) + VMOVDQA Y13, 64(DX) + VMOVDQA Y14, 96(DX) + VMOVDQA Y15, 128(DX) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() + VMOVDQA Y12, 160(DX) + VMOVDQA Y13, 192(DX) + VMOVDQA Y14, 224(DX) + VMOVDQA Y15, 256(DX) + + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + + ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) + ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) + + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + VMOVDQU Y8, 0(AX) + VMOVDQU Y9, 32(AX) + VZEROUPPER + + RET + +#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA +#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB +#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF +#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD +#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE + +#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF +#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF + +#define SHUFFLE_AVX() \ + VMOVDQA X6, X13; \ + VMOVDQA X2, X14; \ + VMOVDQA X4, X6; \ + VPUNPCKLQDQ_X13_X13_X15; \ + VMOVDQA X5, X4; \ + VMOVDQA X6, X5; \ + VPUNPCKHQDQ_X15_X7_X6; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X13_X7; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VPUNPCKHQDQ_X15_X2_X2; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X3_X3; \ + +#define SHUFFLE_AVX_INV() \ + VMOVDQA X2, X13; \ + VMOVDQA X4, X14; \ + VPUNPCKLQDQ_X2_X2_X15; \ + VMOVDQA X5, X4; \ + VPUNPCKHQDQ_X15_X3_X2; \ + VMOVDQA X14, X5; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VMOVDQA X6, X14; \ + VPUNPCKHQDQ_X15_X13_X3; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X6_X6; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X7_X7; \ + +#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + VPADDQ m0, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m1, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFD $-79, v6, v6; \ + VPSHUFD $-79, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPSHUFB c40, v2, v2; \ + VPSHUFB c40, v3, v3; \ + VPADDQ m2, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m3, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFB c48, v6, v6; \ + VPSHUFB c48, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPADDQ v2, v2, t0; \ + VPSRLQ $63, v2, v2; \ + VPXOR t0, v2, v2; \ + VPADDQ v3, v3, t0; \ + VPSRLQ $63, v3, v3; \ + VPXOR t0, v3, v3 + +// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) +// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 +#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X13(i2*8); \ + VMOVQ_SI_X14(i4*8); \ + VMOVQ_SI_X15(i6*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X13(i3*8); \ + VPINSRQ_1_SI_X14(i5*8); \ + VPINSRQ_1_SI_X15(i7*8) + +// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) +#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(1*8); \ + VMOVQ_SI_X15(5*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X13(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(7*8) + +// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) +#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ + VPSHUFD $0x4E, 0*8(SI), X12; \ + VMOVQ_SI_X13(11*8); \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(7*8); \ + VPINSRQ_1_SI_X13(5*8); \ + VPINSRQ_1_SI_X14(2*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) +#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ + VMOVDQU 11*8(SI), X12; \ + VMOVQ_SI_X13(5*8); \ + VMOVQ_SI_X14(8*8); \ + VMOVQ_SI_X15(2*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14_0; \ + VPINSRQ_1_SI_X15(13*8) + +// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) +#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(6*8); \ + VMOVQ_SI_X15_0; \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) +#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ + VMOVQ_SI_X12(9*8); \ + VMOVQ_SI_X13(2*8); \ + VMOVQ_SI_X14_0; \ + VMOVQ_SI_X15(4*8); \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VPINSRQ_1_SI_X15(15*8) + +// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) +#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(11*8); \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X13(8*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) +#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ + MOVQ 0*8(SI), X12; \ + VPSHUFD $0x4E, 8*8(SI), X13; \ + MOVQ 7*8(SI), X14; \ + MOVQ 2*8(SI), X15; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(11*8) + +// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) +#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ + MOVQ 6*8(SI), X12; \ + MOVQ 11*8(SI), X13; \ + MOVQ 15*8(SI), X14; \ + MOVQ 3*8(SI), X15; \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X14(9*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) +#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ + MOVQ 5*8(SI), X12; \ + MOVQ 8*8(SI), X13; \ + MOVQ 0*8(SI), X14; \ + MOVQ 6*8(SI), X15; \ + VPINSRQ_1_SI_X12(15*8); \ + VPINSRQ_1_SI_X13(2*8); \ + VPINSRQ_1_SI_X14(4*8); \ + VPINSRQ_1_SI_X15(10*8) + +// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) +#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ + VMOVDQU 12*8(SI), X12; \ + MOVQ 1*8(SI), X13; \ + MOVQ 2*8(SI), X14; \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VMOVDQU 4*8(SI), X15 + +// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) +#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ + MOVQ 15*8(SI), X12; \ + MOVQ 3*8(SI), X13; \ + MOVQ 11*8(SI), X14; \ + MOVQ 12*8(SI), X15; \ + VPINSRQ_1_SI_X12(9*8); \ + VPINSRQ_1_SI_X13(13*8); \ + VPINSRQ_1_SI_X14(14*8); \ + VPINSRQ_1_SI_X15_0 + +// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 + + VMOVDQU ·AVX_c40<>(SB), X0 + VMOVDQU ·AVX_c48<>(SB), X1 + VMOVDQA X0, X8 + VMOVDQA X1, X9 + + VMOVDQU ·AVX_iv3<>(SB), X0 + VMOVDQA X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) + + VMOVDQU 0(AX), X10 + VMOVDQU 16(AX), X11 + VMOVDQU 32(AX), X2 + VMOVDQU 48(AX), X3 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + VMOVQ_R8_X15 + VPINSRQ_1_R9_X15 + + VMOVDQA X10, X0 + VMOVDQA X11, X1 + VMOVDQU ·AVX_iv0<>(SB), X4 + VMOVDQU ·AVX_iv1<>(SB), X5 + VMOVDQU ·AVX_iv2<>(SB), X6 + + VPXOR X15, X6, X6 + VMOVDQA 0(R10), X7 + + LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA X12, 16(R10) + VMOVDQA X13, 32(R10) + VMOVDQA X14, 48(R10) + VMOVDQA X15, 64(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VMOVDQA X12, 80(R10) + VMOVDQA X13, 96(R10) + VMOVDQA X14, 112(R10) + VMOVDQA X15, 128(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VMOVDQA X12, 144(R10) + VMOVDQA X13, 160(R10) + VMOVDQA X14, 176(R10) + VMOVDQA X15, 192(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VMOVDQA X12, 208(R10) + VMOVDQA X13, 224(R10) + VMOVDQA X14, 240(R10) + VMOVDQA X15, 256(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_11_12_5_15_8_0_2_13() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_2_5_4_15_6_10_0_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_9_5_2_10_0_7_4_15() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_2_6_0_8_12_10_11_3() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_0_6_9_8_7_3_2_11() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_5_15_8_2_0_4_6_10() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_6_14_11_0_15_9_3_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_12_13_1_10_2_7_4_5() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_15_9_3_13_11_14_12_0() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) + SHUFFLE_AVX_INV() + + VMOVDQU 32(AX), X14 + VMOVDQU 48(AX), X15 + VPXOR X0, X10, X10 + VPXOR X1, X11, X11 + VPXOR X2, X14, X14 + VPXOR X3, X15, X15 + VPXOR X4, X10, X10 + VPXOR X5, X11, X11 + VPXOR X6, X14, X2 + VPXOR X7, X15, X3 + VMOVDQU X2, 32(AX) + VMOVDQU X3, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + VMOVDQU X10, 0(AX) + VMOVDQU X11, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + VZEROUPPER + + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go new file mode 100644 index 00000000000..5fa1b32841d --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -0,0 +1,25 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 && amd64 && gc && !purego +// +build !go1.7,amd64,gc,!purego + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + if useSSE4 { + hashBlocksSSE4(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s new file mode 100644 index 00000000000..ae75eb9afcd --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -0,0 +1,279 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +#include "textflag.h" + +DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + PADDQ m0, v0; \ + PADDQ m1, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v6, v6; \ + PSHUFD $0xB1, v7, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + PSHUFB c40, v2; \ + PSHUFB c40, v3; \ + PADDQ m2, v0; \ + PADDQ m3, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFB c48, v6; \ + PSHUFB c48, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + MOVOU v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVOU v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ + MOVQ i0*8(src), m0; \ + PINSRQ $1, i1*8(src), m0; \ + MOVQ i2*8(src), m1; \ + PINSRQ $1, i3*8(src), m1; \ + MOVQ i4*8(src), m2; \ + PINSRQ $1, i5*8(src), m2; \ + MOVQ i6*8(src), m3; \ + PINSRQ $1, i7*8(src), m3 + +// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 + + MOVOU ·iv3<>(SB), X0 + MOVO X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) + + MOVOU ·c40<>(SB), X13 + MOVOU ·c48<>(SB), X14 + + MOVOU 0(AX), X12 + MOVOU 16(AX), X15 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + PINSRQ $1, R9, X8 + + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>(SB), X4 + MOVOU ·iv1<>(SB), X5 + MOVOU ·iv2<>(SB), X6 + + PXOR X8, X6 + MOVO 0(R10), X7 + + LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) + MOVO X8, 16(R10) + MOVO X9, 32(R10) + MOVO X10, 48(R10) + MOVO X11, 64(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) + MOVO X8, 80(R10) + MOVO X9, 96(R10) + MOVO X10, 112(R10) + MOVO X11, 128(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) + MOVO X8, 144(R10) + MOVO X9, 160(R10) + MOVO X10, 176(R10) + MOVO X11, 192(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) + MOVO X8, 208(R10) + MOVO X9, 224(R10) + MOVO X10, 240(R10) + MOVO X11, 256(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVOU X12, 0(AX) + MOVOU X15, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go new file mode 100644 index 00000000000..3168a8aa3c8 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "math/bits" +) + +// the precomputed values for BLAKE2b +// there are 12 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [12][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second +} + +func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + var m [16]uint64 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = binary.LittleEndian.Uint64(blocks[i:]) + i += 8 + } + + for j := range precomputed { + s := &(precomputed[j]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -32) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -24) + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -32) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -24) + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -32) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -24) + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -32) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -24) + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -16) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -63) + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -16) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -63) + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -16) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -63) + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -16) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -63) + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -32) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -24) + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -32) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -24) + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -32) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -24) + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -32) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -24) + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -16) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -63) + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -16) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -63) + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -16) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -63) + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -16) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -63) + + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go new file mode 100644 index 00000000000..b0137cdf025 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -0,0 +1,12 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc +// +build !amd64 purego !gc + +package blake2b + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go new file mode 100644 index 00000000000..52c414db0e6 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2x.go @@ -0,0 +1,177 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "errors" + "io" +) + +// XOF defines the interface to hash functions that +// support arbitrary-length output. +type XOF interface { + // Write absorbs more data into the hash's state. It panics if called + // after Read. + io.Writer + + // Read reads more output from the hash. It returns io.EOF if the limit + // has been reached. + io.Reader + + // Clone returns a copy of the XOF in its current state. + Clone() XOF + + // Reset resets the XOF to its initial state. + Reset() +} + +// OutputLengthUnknown can be used as the size argument to NewXOF to indicate +// the length of the output is not known in advance. +const OutputLengthUnknown = 0 + +// magicUnknownOutputLength is a magic value for the output size that indicates +// an unknown number of output bytes. +const magicUnknownOutputLength = (1 << 32) - 1 + +// maxOutputLength is the absolute maximum number of bytes to produce when the +// number of output bytes is unknown. +const maxOutputLength = (1 << 32) * 64 + +// NewXOF creates a new variable-output-length hash. The hash either produce a +// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes +// (size == OutputLengthUnknown). In the latter case, an absolute limit of +// 256GiB applies. +// +// A non-nil key turns the hash into a MAC. The key must between +// zero and 32 bytes long. +func NewXOF(size uint32, key []byte) (XOF, error) { + if len(key) > Size { + return nil, errKeySize + } + if size == magicUnknownOutputLength { + // 2^32-1 indicates an unknown number of bytes and thus isn't a + // valid length. + return nil, errors.New("blake2b: XOF length too large") + } + if size == OutputLengthUnknown { + size = magicUnknownOutputLength + } + x := &xof{ + d: digest{ + size: Size, + keyLen: len(key), + }, + length: size, + } + copy(x.d.key[:], key) + x.Reset() + return x, nil +} + +type xof struct { + d digest + length uint32 + remaining uint64 + cfg, root, block [Size]byte + offset int + nodeOffset uint32 + readMode bool +} + +func (x *xof) Write(p []byte) (n int, err error) { + if x.readMode { + panic("blake2b: write to XOF after read") + } + return x.d.Write(p) +} + +func (x *xof) Clone() XOF { + clone := *x + return &clone +} + +func (x *xof) Reset() { + x.cfg[0] = byte(Size) + binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length + binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length + x.cfg[17] = byte(Size) // inner hash size + + x.d.Reset() + x.d.h[1] ^= uint64(x.length) << 32 + + x.remaining = uint64(x.length) + if x.remaining == magicUnknownOutputLength { + x.remaining = maxOutputLength + } + x.offset, x.nodeOffset = 0, 0 + x.readMode = false +} + +func (x *xof) Read(p []byte) (n int, err error) { + if !x.readMode { + x.d.finalize(&x.root) + x.readMode = true + } + + if x.remaining == 0 { + return 0, io.EOF + } + + n = len(p) + if uint64(n) > x.remaining { + n = int(x.remaining) + p = p[:n] + } + + if x.offset > 0 { + blockRemaining := Size - x.offset + if n < blockRemaining { + x.offset += copy(p, x.block[x.offset:]) + x.remaining -= uint64(n) + return + } + copy(p, x.block[x.offset:]) + p = p[blockRemaining:] + x.offset = 0 + x.remaining -= uint64(blockRemaining) + } + + for len(p) >= Size { + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + copy(p, x.block[:]) + p = p[Size:] + x.remaining -= uint64(Size) + } + + if todo := len(p); todo > 0 { + if x.remaining < uint64(Size) { + x.cfg[0] = byte(x.remaining) + } + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + x.offset = copy(p, x.block[:todo]) + x.remaining -= uint64(todo) + } + return +} + +func (d *digest) initConfig(cfg *[Size]byte) { + d.offset, d.c[0], d.c[1] = 0, 0, 0 + for i := range d.h { + d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go new file mode 100644 index 00000000000..9d8633963cb --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.9 +// +build go1.9 + +package blake2b + +import ( + "crypto" + "hash" +) + +func init() { + newHash256 := func() hash.Hash { + h, _ := New256(nil) + return h + } + newHash384 := func() hash.Hash { + h, _ := New384(nil) + return h + } + + newHash512 := func() hash.Hash { + h, _ := New512(nil) + return h + } + + crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) + crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) + crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go new file mode 100644 index 00000000000..401414dde2f --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -0,0 +1,816 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + encoding_asn1 "encoding/asn1" + "fmt" + "math/big" + "reflect" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +// This file contains ASN.1-related methods for String and Builder. + +// Builder + +// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Int64(v int64) { + b.addASN1Signed(asn1.INTEGER, v) +} + +// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the +// given tag. +func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { + b.addASN1Signed(tag, v) +} + +// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. +func (b *Builder) AddASN1Enum(v int64) { + b.addASN1Signed(asn1.ENUM, v) +} + +func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { + b.AddASN1(tag, func(c *Builder) { + length := 1 + for i := v; i >= 0x80 || i < -0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Uint64(v uint64) { + b.AddASN1(asn1.INTEGER, func(c *Builder) { + length := 1 + for i := v; i >= 0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1BigInt(n *big.Int) { + if b.err != nil { + return + } + + b.AddASN1(asn1.INTEGER, func(c *Builder) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement form. So we + // invert and subtract 1. If the most-significant-bit isn't set then + // we'll need to pad the beginning with 0xff in order to keep the number + // negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + c.add(0xff) + } + c.add(bytes...) + } else if n.Sign() == 0 { + c.add(0) + } else { + bytes := n.Bytes() + if bytes[0]&0x80 != 0 { + c.add(0) + } + c.add(bytes...) + } + }) +} + +// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. +func (b *Builder) AddASN1OctetString(bytes []byte) { + b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { + c.AddBytes(bytes) + }) +} + +const generalizedTimeFormatStr = "20060102150405Z0700" + +// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. +func (b *Builder) AddASN1GeneralizedTime(t time.Time) { + if t.Year() < 0 || t.Year() > 9999 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) + return + } + b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { + c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) + }) +} + +// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime. +func (b *Builder) AddASN1UTCTime(t time.Time) { + b.AddASN1(asn1.UTCTime, func(c *Builder) { + // As utilized by the X.509 profile, UTCTime can only + // represent the years 1950 through 2049. + if t.Year() < 1950 || t.Year() >= 2050 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t) + return + } + c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr))) + }) +} + +// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not +// support BIT STRINGs that are not a whole number of bytes. +func (b *Builder) AddASN1BitString(data []byte) { + b.AddASN1(asn1.BIT_STRING, func(b *Builder) { + b.AddUint8(0) + b.AddBytes(data) + }) +} + +func (b *Builder) addBase128Int(n int64) { + var length int + if n == 0 { + length = 1 + } else { + for i := n; i > 0; i >>= 7 { + length++ + } + } + + for i := length - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + b.add(o) + } +} + +func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { + if len(oid) < 2 { + return false + } + + if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { + return false + } + + for _, v := range oid { + if v < 0 { + return false + } + } + + return true +} + +func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { + b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { + if !isValidOID(oid) { + b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) + return + } + + b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) + for _, v := range oid[2:] { + b.addBase128Int(int64(v)) + } + }) +} + +func (b *Builder) AddASN1Boolean(v bool) { + b.AddASN1(asn1.BOOLEAN, func(b *Builder) { + if v { + b.AddUint8(0xff) + } else { + b.AddUint8(0) + } + }) +} + +func (b *Builder) AddASN1NULL() { + b.add(uint8(asn1.NULL), 0) +} + +// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if +// successful or records an error if one occurred. +func (b *Builder) MarshalASN1(v interface{}) { + // NOTE(martinkr): This is somewhat of a hack to allow propagation of + // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a + // value embedded into a struct, its tag information is lost. + if b.err != nil { + return + } + bytes, err := encoding_asn1.Marshal(v) + if err != nil { + b.err = err + return + } + b.AddBytes(bytes) +} + +// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. +// Tags greater than 30 are not supported and result in an error (i.e. +// low-tag-number form only). The child builder passed to the +// BuilderContinuation can be used to build the content of the ASN.1 object. +func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { + if b.err != nil { + return + } + // Identifiers with the low five bits set indicate high-tag-number format + // (two or more octets), which we don't support. + if tag&0x1f == 0x1f { + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) + return + } + b.AddUint8(uint8(tag)) + b.addLengthPrefixed(1, true, f) +} + +// String + +// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean +// representation into out and advances. It reports whether the read +// was successful. +func (s *String) ReadASN1Boolean(out *bool) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { + return false + } + + switch bytes[0] { + case 0: + *out = false + case 0xff: + *out = true + default: + return false + } + + return true +} + +// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does +// not point to an integer, to a big.Int, or to a []byte it panics. Only +// positive and zero values can be decoded into []byte, and they are returned as +// big-endian binary values that share memory with s. Positive values will have +// no leading zeroes, and zero will be returned as a single zero byte. +// ReadASN1Integer reports whether the read was successful. +func (s *String) ReadASN1Integer(out interface{}) bool { + switch out := out.(type) { + case *int, *int8, *int16, *int32, *int64: + var i int64 + if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { + return false + } + reflect.ValueOf(out).Elem().SetInt(i) + return true + case *uint, *uint8, *uint16, *uint32, *uint64: + var u uint64 + if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { + return false + } + reflect.ValueOf(out).Elem().SetUint(u) + return true + case *big.Int: + return s.readASN1BigInt(out) + case *[]byte: + return s.readASN1Bytes(out) + default: + panic("out does not point to an integer type") + } +} + +func checkASN1Integer(bytes []byte) bool { + if len(bytes) == 0 { + // An INTEGER is encoded with at least one octet. + return false + } + if len(bytes) == 1 { + return true + } + if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { + // Value is not minimally encoded. + return false + } + return true +} + +var bigOne = big.NewInt(1) + +func (s *String) readASN1BigInt(out *big.Int) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + // Negative number. + neg := make([]byte, len(bytes)) + for i, b := range bytes { + neg[i] = ^b + } + out.SetBytes(neg) + out.Add(out, bigOne) + out.Neg(out) + } else { + out.SetBytes(bytes) + } + return true +} + +func (s *String) readASN1Bytes(out *[]byte) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + return false + } + for len(bytes) > 1 && bytes[0] == 0 { + bytes = bytes[1:] + } + *out = bytes + return true +} + +func (s *String) readASN1Int64(out *int64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { + return false + } + return true +} + +func asn1Signed(out *int64, n []byte) bool { + length := len(n) + if length > 8 { + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= int64(n[i]) + } + // Shift up and down in order to sign extend the result. + *out <<= 64 - uint8(length)*8 + *out >>= 64 - uint8(length)*8 + return true +} + +func (s *String) readASN1Uint64(out *uint64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { + return false + } + return true +} + +func asn1Unsigned(out *uint64, n []byte) bool { + length := len(n) + if length > 9 || length == 9 && n[0] != 0 { + // Too large for uint64. + return false + } + if n[0]&0x80 != 0 { + // Negative number. + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= uint64(n[i]) + } + return true +} + +// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out +// and advances. It reports whether the read was successful and resulted in a +// value that can be represented in an int64. +func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { + var bytes String + return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) +} + +// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports +// whether the read was successful. +func (s *String) ReadASN1Enum(out *int) bool { + var bytes String + var i int64 + if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { + return false + } + if int64(int(i)) != i { + return false + } + *out = int(i) + return true +} + +func (s *String) readBase128Int(out *int) bool { + ret := 0 + for i := 0; len(*s) > 0; i++ { + if i == 5 { + return false + } + // Avoid overflowing int on a 32-bit platform. + // We don't want different behavior based on the architecture. + if ret >= 1<<(31-7) { + return false + } + ret <<= 7 + b := s.read(1)[0] + ret |= int(b & 0x7f) + if b&0x80 == 0 { + *out = ret + return true + } + } + return false // truncated +} + +// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { + return false + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + components := make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + var v int + if !bytes.readBase128Int(&v) { + return false + } + if v < 80 { + components[0] = v / 40 + components[1] = v % 40 + } else { + components[0] = 2 + components[1] = v - 80 + } + + i := 2 + for ; len(bytes) > 0; i++ { + if !bytes.readBase128Int(&v) { + return false + } + components[i] = v + } + *out = components[:i] + return true +} + +// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { + return false + } + t := string(bytes) + res, err := time.Parse(generalizedTimeFormatStr, t) + if err != nil { + return false + } + if serialized := res.Format(generalizedTimeFormatStr); serialized != t { + return false + } + *out = res + return true +} + +const defaultUTCTimeFormatStr = "060102150405Z0700" + +// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1UTCTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.UTCTime) { + return false + } + t := string(bytes) + + formatStr := defaultUTCTimeFormatStr + var err error + res, err := time.Parse(formatStr, t) + if err != nil { + // Fallback to minute precision if we can't parse second + // precision. If we are following X.509 or X.690 we shouldn't + // support this, but we do. + formatStr = "0601021504Z0700" + res, err = time.Parse(formatStr, t) + } + if err != nil { + return false + } + + if serialized := res.Format(formatStr); serialized != t { + return false + } + + if res.Year() >= 2050 { + // UTCTime interprets the low order digits 50-99 as 1950-99. + // This only applies to its use in the X.509 profile. + // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + res = res.AddDate(-100, 0, 0) + } + *out = res + return true +} + +// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || + len(bytes)*8/8 != len(bytes) { + return false + } + + paddingBits := bytes[0] + bytes = bytes[1:] + if paddingBits > 7 || + len(bytes) == 0 && paddingBits != 0 || + len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { + return false + } + + lenBytes := String((*s)[2 : 2+lenLen]) + if !lenBytes.readUnsigned(&len32, int(lenLen)) { + return false + } + + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. + if len32 < 128 { + // Length should have used short-form encoding. + return false + } + if len32>>((lenLen-1)*8) == 0 { + // Leading octet is 0. Length should have been at least one byte shorter. + return false + } + + headerLen = 2 + uint32(lenLen) + if headerLen+len32 < len32 { + // Overflow. + return false + } + length = headerLen + len32 + } + + if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { + return false + } + if skipHeader && !out.Skip(int(headerLen)) { + panic("cryptobyte: internal error") + } + + return true +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go new file mode 100644 index 00000000000..cda8e3edfd5 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 contains supporting types for parsing and building ASN.1 +// messages with the cryptobyte package. +package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" + +// Tag represents an ASN.1 identifier octet, consisting of a tag number +// (indicating a type) and class (such as context-specific or constructed). +// +// Methods in the cryptobyte package only support the low-tag-number form, i.e. +// a single identifier octet with bits 7-8 encoding the class and bits 1-6 +// encoding the tag number. +type Tag uint8 + +const ( + classConstructed = 0x20 + classContextSpecific = 0x80 +) + +// Constructed returns t with the constructed class bit set. +func (t Tag) Constructed() Tag { return t | classConstructed } + +// ContextSpecific returns t with the context-specific class bit set. +func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } + +// The following is a list of standard tag and class combinations. +const ( + BOOLEAN = Tag(1) + INTEGER = Tag(2) + BIT_STRING = Tag(3) + OCTET_STRING = Tag(4) + NULL = Tag(5) + OBJECT_IDENTIFIER = Tag(6) + ENUM = Tag(10) + UTF8String = Tag(12) + SEQUENCE = Tag(16 | classConstructed) + SET = Tag(17 | classConstructed) + PrintableString = Tag(19) + T61String = Tag(20) + IA5String = Tag(22) + UTCTime = Tag(23) + GeneralizedTime = Tag(24) + GeneralString = Tag(27) +) diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go new file mode 100644 index 00000000000..2a90c592d7c --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/builder.go @@ -0,0 +1,342 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "errors" + "fmt" +) + +// A Builder builds byte strings from fixed-length and length-prefixed values. +// Builders either allocate space as needed, or are ‘fixed’, which means that +// they write into a given buffer and produce an error if it's exhausted. +// +// The zero value is a usable Builder that allocates space as needed. +// +// Simple values are marshaled and appended to a Builder using methods on the +// Builder. Length-prefixed values are marshaled by providing a +// BuilderContinuation, which is a function that writes the inner contents of +// the value to a given Builder. See the documentation for BuilderContinuation +// for details. +type Builder struct { + err error + result []byte + fixedSize bool + child *Builder + offset int + pendingLenLen int + pendingIsASN1 bool + inContinuation *bool +} + +// NewBuilder creates a Builder that appends its output to the given buffer. +// Like append(), the slice will be reallocated if its capacity is exceeded. +// Use Bytes to get the final buffer. +func NewBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + } +} + +// NewFixedBuilder creates a Builder that appends its output into the given +// buffer. This builder does not reallocate the output buffer. Writes that +// would exceed the buffer's capacity are treated as an error. +func NewFixedBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + fixedSize: true, + } +} + +// SetError sets the value to be returned as the error from Bytes. Writes +// performed after calling SetError are ignored. +func (b *Builder) SetError(err error) { + b.err = err +} + +// Bytes returns the bytes written by the builder or an error if one has +// occurred during building. +func (b *Builder) Bytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + return b.result[b.offset:], nil +} + +// BytesOrPanic returns the bytes written by the builder or panics if an error +// has occurred during building. +func (b *Builder) BytesOrPanic() []byte { + if b.err != nil { + panic(b.err) + } + return b.result[b.offset:] +} + +// AddUint8 appends an 8-bit value to the byte string. +func (b *Builder) AddUint8(v uint8) { + b.add(byte(v)) +} + +// AddUint16 appends a big-endian, 16-bit value to the byte string. +func (b *Builder) AddUint16(v uint16) { + b.add(byte(v>>8), byte(v)) +} + +// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest +// byte of the 32-bit input value is silently truncated. +func (b *Builder) AddUint24(v uint32) { + b.add(byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint32 appends a big-endian, 32-bit value to the byte string. +func (b *Builder) AddUint32(v uint32) { + b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint64 appends a big-endian, 64-bit value to the byte string. +func (b *Builder) AddUint64(v uint64) { + b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddBytes appends a sequence of bytes to the byte string. +func (b *Builder) AddBytes(v []byte) { + b.add(v...) +} + +// BuilderContinuation is a continuation-passing interface for building +// length-prefixed byte sequences. Builder methods for length-prefixed +// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation +// supplied to them. The child builder passed to the continuation can be used +// to build the content of the length-prefixed sequence. For example: +// +// parent := cryptobyte.NewBuilder() +// parent.AddUint8LengthPrefixed(func (child *Builder) { +// child.AddUint8(42) +// child.AddUint8LengthPrefixed(func (grandchild *Builder) { +// grandchild.AddUint8(5) +// }) +// }) +// +// It is an error to write more bytes to the child than allowed by the reserved +// length prefix. After the continuation returns, the child must be considered +// invalid, i.e. users must not store any copies or references of the child +// that outlive the continuation. +// +// If the continuation panics with a value of type BuildError then the inner +// error will be returned as the error from Bytes. If the child panics +// otherwise then Bytes will repanic with the same value. +type BuilderContinuation func(child *Builder) + +// BuildError wraps an error. If a BuilderContinuation panics with this value, +// the panic will be recovered and the inner error will be returned from +// Builder.Bytes. +type BuildError struct { + Err error +} + +// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. +func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(1, false, f) +} + +// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. +func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(2, false, f) +} + +// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. +func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(3, false, f) +} + +// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. +func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(4, false, f) +} + +func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { + if !*b.inContinuation { + *b.inContinuation = true + + defer func() { + *b.inContinuation = false + + r := recover() + if r == nil { + return + } + + if buildError, ok := r.(BuildError); ok { + b.err = buildError.Err + } else { + panic(r) + } + }() + } + + f(arg) +} + +func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { + // Subsequent writes can be ignored if the builder has encountered an error. + if b.err != nil { + return + } + + offset := len(b.result) + b.add(make([]byte, lenLen)...) + + if b.inContinuation == nil { + b.inContinuation = new(bool) + } + + b.child = &Builder{ + result: b.result, + fixedSize: b.fixedSize, + offset: offset, + pendingLenLen: lenLen, + pendingIsASN1: isASN1, + inContinuation: b.inContinuation, + } + + b.callContinuation(f, b.child) + b.flushChild() + if b.child != nil { + panic("cryptobyte: internal error") + } +} + +func (b *Builder) flushChild() { + if b.child == nil { + return + } + b.child.flushChild() + child := b.child + b.child = nil + + if child.err != nil { + b.err = child.err + return + } + + length := len(child.result) - child.pendingLenLen - child.offset + + if length < 0 { + panic("cryptobyte: internal error") // result unexpectedly shrunk + } + + if child.pendingIsASN1 { + // For ASN.1, we reserved a single byte for the length. If that turned out + // to be incorrect, we have to move the contents along in order to make + // space. + if child.pendingLenLen != 1 { + panic("cryptobyte: internal error") + } + var lenLen, lenByte uint8 + if int64(length) > 0xfffffffe { + b.err = errors.New("pending ASN.1 child too long") + return + } else if length > 0xffffff { + lenLen = 5 + lenByte = 0x80 | 4 + } else if length > 0xffff { + lenLen = 4 + lenByte = 0x80 | 3 + } else if length > 0xff { + lenLen = 3 + lenByte = 0x80 | 2 + } else if length > 0x7f { + lenLen = 2 + lenByte = 0x80 | 1 + } else { + lenLen = 1 + lenByte = uint8(length) + length = 0 + } + + // Insert the initial length byte, make space for successive length bytes, + // and adjust the offset. + child.result[child.offset] = lenByte + extraBytes := int(lenLen - 1) + if extraBytes != 0 { + child.add(make([]byte, extraBytes)...) + childStart := child.offset + child.pendingLenLen + copy(child.result[childStart+extraBytes:], child.result[childStart:]) + } + child.offset++ + child.pendingLenLen = extraBytes + } + + l := length + for i := child.pendingLenLen - 1; i >= 0; i-- { + child.result[child.offset+i] = uint8(l) + l >>= 8 + } + if l != 0 { + b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) + return + } + + if b.fixedSize && &b.result[0] != &child.result[0] { + panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") + } + + b.result = child.result +} + +func (b *Builder) add(bytes ...byte) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted write while child is pending") + } + if len(b.result)+len(bytes) < len(bytes) { + b.err = errors.New("cryptobyte: length overflow") + } + if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { + b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") + return + } + b.result = append(b.result, bytes...) +} + +// Unwrite rolls back n bytes written directly to the Builder. An attempt by a +// child builder passed to a continuation to unwrite bytes from its parent will +// panic. +func (b *Builder) Unwrite(n int) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted unwrite while child is pending") + } + length := len(b.result) - b.pendingLenLen - b.offset + if length < 0 { + panic("cryptobyte: internal error") + } + if n > length { + panic("cryptobyte: attempted to unwrite more than was written") + } + b.result = b.result[:len(b.result)-n] +} + +// A MarshalingValue marshals itself into a Builder. +type MarshalingValue interface { + // Marshal is called by Builder.AddValue. It receives a pointer to a builder + // to marshal itself into. It may return an error that occurred during + // marshaling, such as unset or invalid values. + Marshal(b *Builder) error +} + +// AddValue calls Marshal on v, passing a pointer to the builder to append to. +// If Marshal returns an error, it is set on the Builder so that subsequent +// appends don't have an effect. +func (b *Builder) AddValue(v MarshalingValue) { + err := v.Marshal(b) + if err != nil { + b.err = err + } +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go new file mode 100644 index 00000000000..0531a3d6f1a --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -0,0 +1,172 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cryptobyte contains types that help with parsing and constructing +// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage +// contains useful ASN.1 constants.) +// +// The String type is for parsing. It wraps a []byte slice and provides helper +// functions for consuming structures, value by value. +// +// The Builder type is for constructing messages. It providers helper functions +// for appending values and also for appending length-prefixed submessages – +// without having to worry about calculating the length prefix ahead of time. +// +// See the documentation and examples for the Builder and String types to get +// started. +package cryptobyte // import "golang.org/x/crypto/cryptobyte" + +// String represents a string of bytes. It provides methods for parsing +// fixed-length and length-prefixed values from it. +type String []byte + +// read advances a String by n bytes and returns them. If less than n bytes +// remain, it returns nil. +func (s *String) read(n int) []byte { + if len(*s) < n || n < 0 { + return nil + } + v := (*s)[:n] + *s = (*s)[n:] + return v +} + +// Skip advances the String by n byte and reports whether it was successful. +func (s *String) Skip(n int) bool { + return s.read(n) != nil +} + +// ReadUint8 decodes an 8-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint8(out *uint8) bool { + v := s.read(1) + if v == nil { + return false + } + *out = uint8(v[0]) + return true +} + +// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint16(out *uint16) bool { + v := s.read(2) + if v == nil { + return false + } + *out = uint16(v[0])<<8 | uint16(v[1]) + return true +} + +// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint24(out *uint32) bool { + v := s.read(3) + if v == nil { + return false + } + *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) + return true +} + +// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint32(out *uint32) bool { + v := s.read(4) + if v == nil { + return false + } + *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) + return true +} + +// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint64(out *uint64) bool { + v := s.read(8) + if v == nil { + return false + } + *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7]) + return true +} + +func (s *String) readUnsigned(out *uint32, length int) bool { + v := s.read(length) + if v == nil { + return false + } + var result uint32 + for i := 0; i < length; i++ { + result <<= 8 + result |= uint32(v[i]) + } + *out = result + return true +} + +func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { + lenBytes := s.read(lenLen) + if lenBytes == nil { + return false + } + var length uint32 + for _, b := range lenBytes { + length = length << 8 + length = length | uint32(b) + } + v := s.read(int(length)) + if v == nil { + return false + } + *outChild = v + return true +} + +// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value +// into out and advances over it. It reports whether the read was successful. +func (s *String) ReadUint8LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(1, out) +} + +// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit +// length-prefixed value into out and advances over it. It reports whether the +// read was successful. +func (s *String) ReadUint16LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(2, out) +} + +// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit +// length-prefixed value into out and advances over it. It reports whether +// the read was successful. +func (s *String) ReadUint24LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(3, out) +} + +// ReadBytes reads n bytes into out and advances over them. It reports +// whether the read was successful. +func (s *String) ReadBytes(out *[]byte, n int) bool { + v := s.read(n) + if v == nil { + return false + } + *out = v + return true +} + +// CopyBytes copies len(out) bytes into out and advances over them. It reports +// whether the copy operation was successful +func (s *String) CopyBytes(out []byte) bool { + n := len(out) + v := s.read(n) + if v == nil { + return false + } + return copy(out, v) == n +} + +// Empty reports whether the string does not contain any bytes. +func (s String) Empty() bool { + return len(s) == 0 +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go index 6886dc163cb..46219da2b01 100644 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -116,6 +116,11 @@ func (e *Encoder) SetMaxDynamicTableSize(v uint32) { e.dynTab.setMaxSize(v) } +// MaxDynamicTableSize returns the current dynamic header table size. +func (e *Encoder) MaxDynamicTableSize() (v uint32) { + return e.dynTab.maxSize +} + // SetMaxDynamicTableSizeLimit changes the maximum value that can be // specified in SetMaxDynamicTableSize to v. By default, it is set to // 4096, which is the same size of the default dynamic header table diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index d8a17aa9b02..e35a76c07b7 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -98,6 +98,19 @@ type Server struct { // the HTTP/2 spec's recommendations. MaxConcurrentStreams uint32 + // MaxDecoderHeaderTableSize optionally specifies the http2 + // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It + // informs the remote endpoint of the maximum size of the header compression + // table used to decode header blocks, in octets. If zero, the default value + // of 4096 is used. + MaxDecoderHeaderTableSize uint32 + + // MaxEncoderHeaderTableSize optionally specifies an upper limit for the + // header compression table used for encoding request headers. Received + // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero, + // the default value of 4096 is used. + MaxEncoderHeaderTableSize uint32 + // MaxReadFrameSize optionally specifies the largest frame // this server is willing to read. A valid value is between // 16k and 16M, inclusive. If zero or otherwise invalid, a @@ -170,6 +183,20 @@ func (s *Server) maxConcurrentStreams() uint32 { return defaultMaxStreams } +func (s *Server) maxDecoderHeaderTableSize() uint32 { + if v := s.MaxDecoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + +func (s *Server) maxEncoderHeaderTableSize() uint32 { + if v := s.MaxEncoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + // maxQueuedControlFrames is the maximum number of control frames like // SETTINGS, PING and RST_STREAM that will be queued for writing before // the connection is closed to prevent memory exhaustion attacks. @@ -394,7 +421,6 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { advMaxStreams: s.maxConcurrentStreams(), initialStreamSendWindowSize: initialWindowSize, maxFrameSize: initialMaxFrameSize, - headerTableSize: initialHeaderTableSize, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, @@ -424,12 +450,13 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { sc.flow.add(initialWindowSize) sc.inflow.add(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) fr := NewFramer(sc.bw, c) if s.CountError != nil { fr.countError = s.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() fr.SetMaxReadFrameSize(s.maxReadFrameSize()) sc.framer = fr @@ -559,7 +586,6 @@ type serverConn struct { streams map[uint32]*stream initialStreamSendWindowSize int32 maxFrameSize int32 - headerTableSize uint32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case writingFrame bool // started writing a frame (on serve goroutine or separate) @@ -864,6 +890,7 @@ func (sc *serverConn) serve() { {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, }, }) @@ -1661,7 +1688,6 @@ func (sc *serverConn) processSetting(s Setting) error { } switch s.ID { case SettingHeaderTableSize: - sc.headerTableSize = s.Val sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) case SettingEnablePush: sc.pushEnabled = s.Val != 0 diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 46dda4dc316..30f706e6cb8 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -118,6 +118,28 @@ type Transport struct { // to mean no limit. MaxHeaderListSize uint32 + // MaxReadFrameSize is the http2 SETTINGS_MAX_FRAME_SIZE to send in the + // initial settings frame. It is the size in bytes of the largest frame + // payload that the sender is willing to receive. If 0, no setting is + // sent, and the value is provided by the peer, which should be 16384 + // according to the spec: + // https://datatracker.ietf.org/doc/html/rfc7540#section-6.5.2. + // Values are bounded in the range 16k to 16M. + MaxReadFrameSize uint32 + + // MaxDecoderHeaderTableSize optionally specifies the http2 + // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It + // informs the remote endpoint of the maximum size of the header compression + // table used to decode header blocks, in octets. If zero, the default value + // of 4096 is used. + MaxDecoderHeaderTableSize uint32 + + // MaxEncoderHeaderTableSize optionally specifies an upper limit for the + // header compression table used for encoding request headers. Received + // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero, + // the default value of 4096 is used. + MaxEncoderHeaderTableSize uint32 + // StrictMaxConcurrentStreams controls whether the server's // SETTINGS_MAX_CONCURRENT_STREAMS should be respected // globally. If false, new TCP connections are created to the @@ -171,6 +193,19 @@ func (t *Transport) maxHeaderListSize() uint32 { return t.MaxHeaderListSize } +func (t *Transport) maxFrameReadSize() uint32 { + if t.MaxReadFrameSize == 0 { + return 0 // use the default provided by the peer + } + if t.MaxReadFrameSize < minMaxFrameSize { + return minMaxFrameSize + } + if t.MaxReadFrameSize > maxFrameSize { + return maxFrameSize + } + return t.MaxReadFrameSize +} + func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } @@ -293,10 +328,11 @@ type ClientConn struct { lastActive time.Time lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -681,6 +717,20 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } +func (t *Transport) maxDecoderHeaderTableSize() uint32 { + if v := t.MaxDecoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + +func (t *Transport) maxEncoderHeaderTableSize() uint32 { + if v := t.MaxEncoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } @@ -721,15 +771,19 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) + if t.maxFrameReadSize() != 0 { + cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) + } if t.CountError != nil { cc.fr.countError = t.CountError } - cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + maxHeaderTableSize := t.maxDecoderHeaderTableSize() + cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() - // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on - // henc in response to SETTINGS frames? cc.henc = hpack.NewEncoder(&cc.hbuf) + cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.peerMaxHeaderTableSize = initialHeaderTableSize if t.AllowHTTP { cc.nextStreamID = 3 @@ -744,9 +798,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro {ID: SettingEnablePush, Val: 0}, {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, } + if max := t.maxFrameReadSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + } if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } + if maxHeaderTableSize != initialHeaderTableSize { + initialSettings = append(initialSettings, Setting{ID: SettingHeaderTableSize, Val: maxHeaderTableSize}) + } cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) @@ -2773,8 +2833,10 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { cc.cond.Broadcast() cc.initialWindowSize = s.Val + case SettingHeaderTableSize: + cc.henc.SetMaxDynamicTableSize(s.Val) + cc.peerMaxHeaderTableSize = s.Val default: - // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. cc.vlogf("Unhandled Setting: %v", s) } return nil diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go index e917195d53a..2bf3202b290 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -62,6 +62,13 @@ const ( // The AWS authorization header name for the auto-generated date. awsDateHeader = "x-amz-date" + // Supported AWS configuration environment variables. + awsAccessKeyId = "AWS_ACCESS_KEY_ID" + awsDefaultRegion = "AWS_DEFAULT_REGION" + awsRegion = "AWS_REGION" + awsSecretAccessKey = "AWS_SECRET_ACCESS_KEY" + awsSessionToken = "AWS_SESSION_TOKEN" + awsTimeFormatLong = "20060102T150405Z" awsTimeFormatShort = "20060102" ) @@ -267,6 +274,49 @@ type awsRequest struct { Headers []awsRequestHeader `json:"headers"` } +func (cs awsCredentialSource) validateMetadataServers() error { + if err := cs.validateMetadataServer(cs.RegionURL, "region_url"); err != nil { + return err + } + if err := cs.validateMetadataServer(cs.CredVerificationURL, "url"); err != nil { + return err + } + return cs.validateMetadataServer(cs.IMDSv2SessionTokenURL, "imdsv2_session_token_url") +} + +var validHostnames []string = []string{"169.254.169.254", "fd00:ec2::254"} + +func (cs awsCredentialSource) isValidMetadataServer(metadataUrl string) bool { + if metadataUrl == "" { + // Zero value means use default, which is valid. + return true + } + + u, err := url.Parse(metadataUrl) + if err != nil { + // Unparseable URL means invalid + return false + } + + for _, validHostname := range validHostnames { + if u.Hostname() == validHostname { + // If it's one of the valid hostnames, everything is good + return true + } + } + + // hostname not found in our allowlist, so not valid + return false +} + +func (cs awsCredentialSource) validateMetadataServer(metadataUrl, urlName string) error { + if !cs.isValidMetadataServer(metadataUrl) { + return fmt.Errorf("oauth2/google: invalid hostname %s for %s", metadataUrl, urlName) + } + + return nil +} + func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { if cs.client == nil { cs.client = oauth2.NewClient(cs.ctx, nil) @@ -274,16 +324,33 @@ func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, erro return cs.client.Do(req.WithContext(cs.ctx)) } +func canRetrieveRegionFromEnvironment() bool { + // The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is + // required. + return getenv(awsRegion) != "" || getenv(awsDefaultRegion) != "" +} + +func canRetrieveSecurityCredentialFromEnvironment() bool { + // Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available. + return getenv(awsAccessKeyId) != "" && getenv(awsSecretAccessKey) != "" +} + +func shouldUseMetadataServer() bool { + return !canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment() +} + func (cs awsCredentialSource) subjectToken() (string, error) { if cs.requestSigner == nil { - awsSessionToken, err := cs.getAWSSessionToken() - if err != nil { - return "", err - } - headers := make(map[string]string) - if awsSessionToken != "" { - headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + if shouldUseMetadataServer() { + awsSessionToken, err := cs.getAWSSessionToken() + if err != nil { + return "", err + } + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + } } awsSecurityCredentials, err := cs.getSecurityCredentials(headers) @@ -389,11 +456,11 @@ func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { } func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) { - if envAwsRegion := getenv("AWS_REGION"); envAwsRegion != "" { - return envAwsRegion, nil - } - if envAwsRegion := getenv("AWS_DEFAULT_REGION"); envAwsRegion != "" { - return envAwsRegion, nil + if canRetrieveRegionFromEnvironment() { + if envAwsRegion := getenv(awsRegion); envAwsRegion != "" { + return envAwsRegion, nil + } + return getenv("AWS_DEFAULT_REGION"), nil } if cs.RegionURL == "" { @@ -434,14 +501,12 @@ func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, err } func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result awsSecurityCredentials, err error) { - if accessKeyID := getenv("AWS_ACCESS_KEY_ID"); accessKeyID != "" { - if secretAccessKey := getenv("AWS_SECRET_ACCESS_KEY"); secretAccessKey != "" { - return awsSecurityCredentials{ - AccessKeyID: accessKeyID, - SecretAccessKey: secretAccessKey, - SecurityToken: getenv("AWS_SESSION_TOKEN"), - }, nil - } + if canRetrieveSecurityCredentialFromEnvironment() { + return awsSecurityCredentials{ + AccessKeyID: getenv(awsAccessKeyId), + SecretAccessKey: getenv(awsSecretAccessKey), + SecurityToken: getenv(awsSessionToken), + }, nil } roleName, err := cs.getMetadataRoleName(headers) diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index 9fc35535e7f..3eab8df7ced 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -213,6 +213,10 @@ func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL } + if err := awsCredSource.validateMetadataServers(); err != nil { + return nil, err + } + return awsCredSource, nil } } else if c.CredentialSource.File != "" { diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go new file mode 100644 index 00000000000..8473fb7922c --- /dev/null +++ b/vendor/golang.org/x/sync/singleflight/singleflight.go @@ -0,0 +1,205 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight // import "golang.org/x/sync/singleflight" + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +// +// The returned channel will not be closed. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + g.mu.Lock() + defer g.mu.Unlock() + c.wg.Done() + if g.m[key] == c { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + // In order to prevent the waiting channels from being blocked forever, + // needs to ensure that this panic cannot be recovered. + if len(c.chans) > 0 { + go panic(e) + select {} // Keep this goroutine around so that it will appear in the crash dump. + } else { + panic(e) + } + } else if c.err == errGoexit { + // Already in the process of goexit, no need to call again + } else { + // Normal return + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go index 1e7a9ada0b0..46c5b525e7b 100644 --- a/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ b/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -7,9 +7,11 @@ package execabs -import "strings" +import ( + "errors" + "os/exec" +) func isGo119ErrDot(err error) bool { - // TODO: return errors.Is(err, exec.ErrDot) - return strings.Contains(err.Error(), "current directory") + return errors.Is(err, exec.ErrDot) } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 7a6ba43a7ee..a49853e9d3a 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -367,6 +367,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys IsWindowUnicode(hwnd HWND) (isUnicode bool) = user32.IsWindowUnicode //sys IsWindowVisible(hwnd HWND) (isVisible bool) = user32.IsWindowVisible //sys GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) = user32.GetGUIThreadInfo +//sys GetLargePageMinimum() (size uintptr) // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 96ba8559c37..ac60052e44a 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -252,6 +252,7 @@ var ( procGetFileType = modkernel32.NewProc("GetFileType") procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") + procGetLargePageMinimum = modkernel32.NewProc("GetLargePageMinimum") procGetLastError = modkernel32.NewProc("GetLastError") procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") @@ -2180,6 +2181,12 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) ( return } +func GetLargePageMinimum() (size uintptr) { + r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0) + size = uintptr(r0) + return +} + func GetLastError() (lasterr error) { r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) if r0 != 0 { diff --git a/vendor/golang.org/x/text/unicode/bidi/trieval.go b/vendor/golang.org/x/text/unicode/bidi/trieval.go index 4c459c4b72e..6a796e2214c 100644 --- a/vendor/golang.org/x/text/unicode/bidi/trieval.go +++ b/vendor/golang.org/x/text/unicode/bidi/trieval.go @@ -37,18 +37,6 @@ const ( unknownClass = ^Class(0) ) -var controlToClass = map[rune]Class{ - 0x202D: LRO, // LeftToRightOverride, - 0x202E: RLO, // RightToLeftOverride, - 0x202A: LRE, // LeftToRightEmbedding, - 0x202B: RLE, // RightToLeftEmbedding, - 0x202C: PDF, // PopDirectionalFormat, - 0x2066: LRI, // LeftToRightIsolate, - 0x2067: RLI, // RightToLeftIsolate, - 0x2068: FSI, // FirstStrongIsolate, - 0x2069: PDI, // PopDirectionalIsolate, -} - // A trie entry has the following bits: // 7..5 XOR mask for brackets // 4 1: Bracket open, 0: Bracket close diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go new file mode 100644 index 00000000000..65b125abd2c --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -0,0 +1,480 @@ +// Copyright 2011 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package googleapi contains the common code shared by all Google API +// libraries. +package googleapi // import "google.golang.org/api/googleapi" + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "google.golang.org/api/internal/third_party/uritemplates" +) + +// ContentTyper is an interface for Readers which know (or would like +// to override) their Content-Type. If a media body doesn't implement +// ContentTyper, the type is sniffed from the content using +// http.DetectContentType. +type ContentTyper interface { + ContentType() string +} + +// A SizeReaderAt is a ReaderAt with a Size method. +// An io.SectionReader implements SizeReaderAt. +type SizeReaderAt interface { + io.ReaderAt + Size() int64 +} + +// ServerResponse is embedded in each Do response and +// provides the HTTP status code and header sent by the server. +type ServerResponse struct { + // HTTPStatusCode is the server's response status code. When using a + // resource method's Do call, this will always be in the 2xx range. + HTTPStatusCode int + // Header contains the response header fields from the server. + Header http.Header +} + +const ( + // Version defines the gax version being used. This is typically sent + // in an HTTP header to services. + Version = "0.5" + + // UserAgent is the header string used to identify this package. + UserAgent = "google-api-go-client/" + Version + + // DefaultUploadChunkSize is the default chunk size to use for resumable + // uploads if not specified by the user. + DefaultUploadChunkSize = 16 * 1024 * 1024 + + // MinUploadChunkSize is the minimum chunk size that can be used for + // resumable uploads. All user-specified chunk sizes must be multiple of + // this value. + MinUploadChunkSize = 256 * 1024 +) + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code and will always be populated. + Code int `json:"code"` + // Message is the server response message and is only populated when + // explicitly referenced by the JSON server response. + Message string `json:"message"` + // Details provide more context to an error. + Details []interface{} `json:"details"` + // Body is the raw response returned by the server. + // It is often but not always JSON, depending on how the request fails. + Body string + // Header contains the response header fields from the server. + Header http.Header + + Errors []ErrorItem + // err is typically a wrapped apierror.APIError, see + // google-api-go-client/internal/gensupport/error.go. + err error +} + +// ErrorItem is a detailed error code & message from the Google API frontend. +type ErrorItem struct { + // Reason is the typed error code. For example: "some_example". + Reason string `json:"reason"` + // Message is the human-readable description of the error. + Message string `json:"message"` +} + +func (e *Error) Error() string { + if len(e.Errors) == 0 && e.Message == "" { + return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body) + } + var buf bytes.Buffer + fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code) + if e.Message != "" { + fmt.Fprintf(&buf, "%s", e.Message) + } + if len(e.Details) > 0 { + var detailBuf bytes.Buffer + enc := json.NewEncoder(&detailBuf) + enc.SetIndent("", " ") + if err := enc.Encode(e.Details); err == nil { + fmt.Fprint(&buf, "\nDetails:") + fmt.Fprintf(&buf, "\n%s", detailBuf.String()) + + } + } + if len(e.Errors) == 0 { + return strings.TrimSpace(buf.String()) + } + if len(e.Errors) == 1 && e.Errors[0].Message == e.Message { + fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason) + return buf.String() + } + fmt.Fprintln(&buf, "\nMore details:") + for _, v := range e.Errors { + fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message) + } + return buf.String() +} + +// Wrap allows an existing Error to wrap another error. See also [Error.Unwrap]. +func (e *Error) Wrap(err error) { + e.err = err +} + +func (e *Error) Unwrap() error { + return e.err +} + +type errorReply struct { + Error *Error `json:"error"` +} + +// CheckResponse returns an error (of type *Error) if the response +// status code is not 2xx. +func CheckResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, err := ioutil.ReadAll(res.Body) + if err == nil { + jerr := new(errorReply) + err = json.Unmarshal(slurp, jerr) + if err == nil && jerr.Error != nil { + if jerr.Error.Code == 0 { + jerr.Error.Code = res.StatusCode + } + jerr.Error.Body = string(slurp) + jerr.Error.Header = res.Header + return jerr.Error + } + } + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + Header: res.Header, + } +} + +// IsNotModified reports whether err is the result of the +// server replying with http.StatusNotModified. +// Such error values are sometimes returned by "Do" methods +// on calls when If-None-Match is used. +func IsNotModified(err error) bool { + if err == nil { + return false + } + ae, ok := err.(*Error) + return ok && ae.Code == http.StatusNotModified +} + +// CheckMediaResponse returns an error (of type *Error) if the response +// status code is not 2xx. Unlike CheckResponse it does not assume the +// body is a JSON error document. +// It is the caller's responsibility to close res.Body. +func CheckMediaResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + } +} + +// MarshalStyle defines whether to marshal JSON with a {"data": ...} wrapper. +type MarshalStyle bool + +// WithDataWrapper marshals JSON with a {"data": ...} wrapper. +var WithDataWrapper = MarshalStyle(true) + +// WithoutDataWrapper marshals JSON without a {"data": ...} wrapper. +var WithoutDataWrapper = MarshalStyle(false) + +func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { + buf := new(bytes.Buffer) + if wrap { + buf.Write([]byte(`{"data": `)) + } + err := json.NewEncoder(buf).Encode(v) + if err != nil { + return nil, err + } + if wrap { + buf.Write([]byte(`}`)) + } + return buf, nil +} + +// ProgressUpdater is a function that is called upon every progress update of a resumable upload. +// This is the only part of a resumable upload (from googleapi) that is usable by the developer. +// The remaining usable pieces of resumable uploads is exposed in each auto-generated API. +type ProgressUpdater func(current, total int64) + +// MediaOption defines the interface for setting media options. +type MediaOption interface { + setOptions(o *MediaOptions) +} + +type contentTypeOption string + +func (ct contentTypeOption) setOptions(o *MediaOptions) { + o.ContentType = string(ct) + if o.ContentType == "" { + o.ForceEmptyContentType = true + } +} + +// ContentType returns a MediaOption which sets the Content-Type header for media uploads. +// If ctype is empty, the Content-Type header will be omitted. +func ContentType(ctype string) MediaOption { + return contentTypeOption(ctype) +} + +type chunkSizeOption int + +func (cs chunkSizeOption) setOptions(o *MediaOptions) { + size := int(cs) + if size%MinUploadChunkSize != 0 { + size += MinUploadChunkSize - (size % MinUploadChunkSize) + } + o.ChunkSize = size +} + +// ChunkSize returns a MediaOption which sets the chunk size for media uploads. +// size will be rounded up to the nearest multiple of 256K. +// Media which contains fewer than size bytes will be uploaded in a single request. +// Media which contains size bytes or more will be uploaded in separate chunks. +// If size is zero, media will be uploaded in a single request. +func ChunkSize(size int) MediaOption { + return chunkSizeOption(size) +} + +type chunkRetryDeadlineOption time.Duration + +func (cd chunkRetryDeadlineOption) setOptions(o *MediaOptions) { + o.ChunkRetryDeadline = time.Duration(cd) +} + +// ChunkRetryDeadline returns a MediaOption which sets a per-chunk retry +// deadline. If a single chunk has been attempting to upload for longer than +// this time and the request fails, it will no longer be retried, and the error +// will be returned to the caller. +// This is only applicable for files which are large enough to require +// a multi-chunk resumable upload. +// The default value is 32s. +// To set a deadline on the entire upload, use context timeout or cancellation. +func ChunkRetryDeadline(deadline time.Duration) MediaOption { + return chunkRetryDeadlineOption(deadline) +} + +// MediaOptions stores options for customizing media upload. It is not used by developers directly. +type MediaOptions struct { + ContentType string + ForceEmptyContentType bool + ChunkSize int + ChunkRetryDeadline time.Duration +} + +// ProcessMediaOptions stores options from opts in a MediaOptions. +// It is not used by developers directly. +func ProcessMediaOptions(opts []MediaOption) *MediaOptions { + mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize} + for _, o := range opts { + o.setOptions(mo) + } + return mo +} + +// ResolveRelative resolves relatives such as "http://www.golang.org/" and +// "topics/myproject/mytopic" into a single string, such as +// "http://www.golang.org/topics/myproject/mytopic". It strips all parent +// references (e.g. ../..) as well as anything after the host +// (e.g. /bar/gaz gets stripped out of foo.com/bar/gaz). +// +// ResolveRelative panics if either basestr or relstr is not able to be parsed. +func ResolveRelative(basestr, relstr string) string { + u, err := url.Parse(basestr) + if err != nil { + panic(fmt.Sprintf("failed to parse %q", basestr)) + } + afterColonPath := "" + if i := strings.IndexRune(relstr, ':'); i > 0 { + afterColonPath = relstr[i+1:] + relstr = relstr[:i] + } + rel, err := url.Parse(relstr) + if err != nil { + panic(fmt.Sprintf("failed to parse %q", relstr)) + } + u = u.ResolveReference(rel) + us := u.String() + if afterColonPath != "" { + us = fmt.Sprintf("%s:%s", us, afterColonPath) + } + us = strings.Replace(us, "%7B", "{", -1) + us = strings.Replace(us, "%7D", "}", -1) + us = strings.Replace(us, "%2A", "*", -1) + return us +} + +// Expand subsitutes any {encoded} strings in the URL passed in using +// the map supplied. +// +// This calls SetOpaque to avoid encoding of the parameters in the URL path. +func Expand(u *url.URL, expansions map[string]string) { + escaped, unescaped, err := uritemplates.Expand(u.Path, expansions) + if err == nil { + u.Path = unescaped + u.RawPath = escaped + } +} + +// CloseBody is used to close res.Body. +// Prior to calling Close, it also tries to Read a small amount to see an EOF. +// Not seeing an EOF can prevent HTTP Transports from reusing connections. +func CloseBody(res *http.Response) { + if res == nil || res.Body == nil { + return + } + // Justification for 3 byte reads: two for up to "\r\n" after + // a JSON/XML document, and then 1 to see EOF if we haven't yet. + // TODO(bradfitz): detect Go 1.3+ and skip these reads. + // See https://codereview.appspot.com/58240043 + // and https://codereview.appspot.com/49570044 + buf := make([]byte, 1) + for i := 0; i < 3; i++ { + _, err := res.Body.Read(buf) + if err != nil { + break + } + } + res.Body.Close() + +} + +// VariantType returns the type name of the given variant. +// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned. +// This is used to support "variant" APIs that can return one of a number of different types. +func VariantType(t map[string]interface{}) string { + s, _ := t["type"].(string) + return s +} + +// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'. +// This is used to support "variant" APIs that can return one of a number of different types. +// It reports whether the conversion was successful. +func ConvertVariant(v map[string]interface{}, dst interface{}) bool { + var buf bytes.Buffer + err := json.NewEncoder(&buf).Encode(v) + if err != nil { + return false + } + return json.Unmarshal(buf.Bytes(), dst) == nil +} + +// A Field names a field to be retrieved with a partial response. +// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance +// +// Partial responses can dramatically reduce the amount of data that must be sent to your application. +// In order to request partial responses, you can specify the full list of fields +// that your application needs by adding the Fields option to your request. +// +// Field strings use camelCase with leading lower-case characters to identify fields within the response. +// +// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, +// you could request just those fields like this: +// +// svc.Events.List().Fields("nextPageToken", "items/id").Do() +// +// or if you were also interested in each Item's "Updated" field, you can combine them like this: +// +// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() +// +// Another way to find field names is through the Google API explorer: +// https://developers.google.com/apis-explorer/#p/ +type Field string + +// CombineFields combines fields into a single string. +func CombineFields(s []Field) string { + r := make([]string, len(s)) + for i, v := range s { + r[i] = string(v) + } + return strings.Join(r, ",") +} + +// A CallOption is an optional argument to an API call. +// It should be treated as an opaque value by users of Google APIs. +// +// A CallOption is something that configures an API call in a way that is +// not specific to that API; for instance, controlling the quota user for +// an API call is common across many APIs, and is thus a CallOption. +type CallOption interface { + Get() (key, value string) +} + +// A MultiCallOption is an option argument to an API call and can be passed +// anywhere a CallOption is accepted. It additionally supports returning a slice +// of values for a given key. +type MultiCallOption interface { + CallOption + GetMulti() (key string, value []string) +} + +// QuotaUser returns a CallOption that will set the quota user for a call. +// The quota user can be used by server-side applications to control accounting. +// It can be an arbitrary string up to 40 characters, and will override UserIP +// if both are provided. +func QuotaUser(u string) CallOption { return quotaUser(u) } + +type quotaUser string + +func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) } + +// UserIP returns a CallOption that will set the "userIp" parameter of a call. +// This should be the IP address of the originating request. +func UserIP(ip string) CallOption { return userIP(ip) } + +type userIP string + +func (i userIP) Get() (string, string) { return "userIp", string(i) } + +// Trace returns a CallOption that enables diagnostic tracing for a call. +// traceToken is an ID supplied by Google support. +func Trace(traceToken string) CallOption { return traceTok(traceToken) } + +type traceTok string + +func (t traceTok) Get() (string, string) { return "trace", "token:" + string(t) } + +type queryParameter struct { + key string + values []string +} + +// QueryParameter allows setting the value(s) of an arbitrary key. +func QueryParameter(key string, values ...string) CallOption { + return queryParameter{key: key, values: append([]string{}, values...)} +} + +// Get will never actually be called -- GetMulti will. +func (q queryParameter) Get() (string, string) { + return "", "" +} + +// GetMulti returns the key and values values associated to that key. +func (q queryParameter) GetMulti() (string, []string) { + return q.key, q.values +} + +// TODO: Fields too diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go new file mode 100644 index 00000000000..fabf74d50d0 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/types.go @@ -0,0 +1,202 @@ +// Copyright 2013 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package googleapi + +import ( + "encoding/json" + "errors" + "strconv" +) + +// Int64s is a slice of int64s that marshal as quoted strings in JSON. +type Int64s []int64 + +func (q *Int64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, int64(v)) + } + return nil +} + +// Int32s is a slice of int32s that marshal as quoted strings in JSON. +type Int32s []int32 + +func (q *Int32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, int32(v)) + } + return nil +} + +// Uint64s is a slice of uint64s that marshal as quoted strings in JSON. +type Uint64s []uint64 + +func (q *Uint64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, uint64(v)) + } + return nil +} + +// Uint32s is a slice of uint32s that marshal as quoted strings in JSON. +type Uint32s []uint32 + +func (q *Uint32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, uint32(v)) + } + return nil +} + +// Float64s is a slice of float64s that marshal as quoted strings in JSON. +type Float64s []float64 + +func (q *Float64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseFloat(s, 64) + if err != nil { + return err + } + *q = append(*q, float64(v)) + } + return nil +} + +func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) { + dst := make([]byte, 0, 2+n*10) // somewhat arbitrary + dst = append(dst, '[') + for i := 0; i < n; i++ { + if i > 0 { + dst = append(dst, ',') + } + dst = append(dst, '"') + dst = fn(dst, i) + dst = append(dst, '"') + } + dst = append(dst, ']') + return dst, nil +} + +func (q Int64s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, q[i], 10) + }) +} + +func (q Int32s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, int64(q[i]), 10) + }) +} + +func (q Uint64s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, q[i], 10) + }) +} + +func (q Uint32s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, uint64(q[i]), 10) + }) +} + +func (q Float64s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendFloat(dst, q[i], 'g', -1, 64) + }) +} + +// RawMessage is a raw encoded JSON value. +// It is identical to json.RawMessage, except it does not suffer from +// https://golang.org/issue/14493. +type RawMessage []byte + +// MarshalJSON returns m. +func (m RawMessage) MarshalJSON() ([]byte, error) { + return m, nil +} + +// UnmarshalJSON sets *m to a copy of data. +func (m *RawMessage) UnmarshalJSON(data []byte) error { + if m == nil { + return errors.New("googleapi.RawMessage: UnmarshalJSON on nil pointer") + } + *m = append((*m)[:0], data...) + return nil +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/google.golang.org/api/internal/conn_pool.go b/vendor/google.golang.org/api/internal/conn_pool.go new file mode 100644 index 00000000000..fedcce15b46 --- /dev/null +++ b/vendor/google.golang.org/api/internal/conn_pool.go @@ -0,0 +1,30 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "google.golang.org/grpc" +) + +// ConnPool is a pool of grpc.ClientConns. +type ConnPool interface { + // Conn returns a ClientConn from the pool. + // + // Conns aren't returned to the pool. + Conn() *grpc.ClientConn + + // Num returns the number of connections in the pool. + // + // It will always return the same value. + Num() int + + // Close closes every ClientConn in the pool. + // + // The error returned by Close may be a single error or multiple errors. + Close() error + + // ConnPool implements grpc.ClientConnInterface to enable it to be used directly with generated proto stubs. + grpc.ClientConnInterface +} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go new file mode 100644 index 00000000000..32d52413b30 --- /dev/null +++ b/vendor/google.golang.org/api/internal/creds.go @@ -0,0 +1,159 @@ +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + + "golang.org/x/oauth2" + "google.golang.org/api/internal/impersonate" + + "golang.org/x/oauth2/google" +) + +// Creds returns credential information obtained from DialSettings, or if none, then +// it returns default credential information. +func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { + creds, err := baseCreds(ctx, ds) + if err != nil { + return nil, err + } + if ds.ImpersonationConfig != nil { + return impersonateCredentials(ctx, creds, ds) + } + return creds, nil +} + +func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { + if ds.InternalCredentials != nil { + return ds.InternalCredentials, nil + } + if ds.Credentials != nil { + return ds.Credentials, nil + } + if ds.CredentialsJSON != nil { + return credentialsFromJSON(ctx, ds.CredentialsJSON, ds) + } + if ds.CredentialsFile != "" { + data, err := ioutil.ReadFile(ds.CredentialsFile) + if err != nil { + return nil, fmt.Errorf("cannot read credentials file: %v", err) + } + return credentialsFromJSON(ctx, data, ds) + } + if ds.TokenSource != nil { + return &google.Credentials{TokenSource: ds.TokenSource}, nil + } + cred, err := google.FindDefaultCredentials(ctx, ds.GetScopes()...) + if err != nil { + return nil, err + } + if len(cred.JSON) > 0 { + return credentialsFromJSON(ctx, cred.JSON, ds) + } + // For GAE and GCE, the JSON is empty so return the default credentials directly. + return cred, nil +} + +// JSON key file type. +const ( + serviceAccountKey = "service_account" +) + +// credentialsFromJSON returns a google.Credentials from the JSON data +// +// - A self-signed JWT flow will be executed if the following conditions are +// met: +// +// (1) At least one of the following is true: +// (a) No scope is provided +// (b) Scope for self-signed JWT flow is enabled +// (c) Audiences are explicitly provided by users +// (2) No service account impersontation +// +// - Otherwise, executes standard OAuth 2.0 flow +// More details: google.aip.dev/auth/4111 +func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*google.Credentials, error) { + // By default, a standard OAuth 2.0 token source is created + cred, err := google.CredentialsFromJSON(ctx, data, ds.GetScopes()...) + if err != nil { + return nil, err + } + + // Override the token source to use self-signed JWT if conditions are met + isJWTFlow, err := isSelfSignedJWTFlow(data, ds) + if err != nil { + return nil, err + } + if isJWTFlow { + ts, err := selfSignedJWTTokenSource(data, ds) + if err != nil { + return nil, err + } + cred.TokenSource = ts + } + + return cred, err +} + +func isSelfSignedJWTFlow(data []byte, ds *DialSettings) (bool, error) { + if (ds.EnableJwtWithScope || ds.HasCustomAudience()) && + ds.ImpersonationConfig == nil { + // Check if JSON is a service account and if so create a self-signed JWT. + var f struct { + Type string `json:"type"` + // The rest JSON fields are omitted because they are not used. + } + if err := json.Unmarshal(data, &f); err != nil { + return false, err + } + return f.Type == serviceAccountKey, nil + } + return false, nil +} + +func selfSignedJWTTokenSource(data []byte, ds *DialSettings) (oauth2.TokenSource, error) { + if len(ds.GetScopes()) > 0 && !ds.HasCustomAudience() { + // Scopes are preferred in self-signed JWT unless the scope is not available + // or a custom audience is used. + return google.JWTAccessTokenSourceWithScope(data, ds.GetScopes()...) + } else if ds.GetAudience() != "" { + // Fallback to audience if scope is not provided + return google.JWTAccessTokenSourceFromJSON(data, ds.GetAudience()) + } else { + return nil, errors.New("neither scopes or audience are available for the self-signed JWT") + } +} + +// QuotaProjectFromCreds returns the quota project from the JSON blob in the provided credentials. +// +// NOTE(cbro): consider promoting this to a field on google.Credentials. +func QuotaProjectFromCreds(cred *google.Credentials) string { + var v struct { + QuotaProject string `json:"quota_project_id"` + } + if err := json.Unmarshal(cred.JSON, &v); err != nil { + return "" + } + return v.QuotaProject +} + +func impersonateCredentials(ctx context.Context, creds *google.Credentials, ds *DialSettings) (*google.Credentials, error) { + if len(ds.ImpersonationConfig.Scopes) == 0 { + ds.ImpersonationConfig.Scopes = ds.GetScopes() + } + ts, err := impersonate.TokenSource(ctx, creds.TokenSource, ds.ImpersonationConfig) + if err != nil { + return nil, err + } + return &google.Credentials{ + TokenSource: ts, + ProjectID: creds.ProjectID, + }, nil +} diff --git a/vendor/google.golang.org/api/internal/impersonate/impersonate.go b/vendor/google.golang.org/api/internal/impersonate/impersonate.go new file mode 100644 index 00000000000..b465bbcd12e --- /dev/null +++ b/vendor/google.golang.org/api/internal/impersonate/impersonate.go @@ -0,0 +1,128 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package impersonate is used to impersonate Google Credentials. +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "golang.org/x/oauth2" +) + +// Config for generating impersonated credentials. +type Config struct { + // Target is the service account to impersonate. Required. + Target string + // Scopes the impersonated credential should have. Required. + Scopes []string + // Delegates are the service accounts in a delegation chain. Each service + // account must be granted roles/iam.serviceAccountTokenCreator on the next + // service account in the chain. Optional. + Delegates []string +} + +// TokenSource returns an impersonated TokenSource configured with the provided +// config using ts as the base credential provider for making requests. +func TokenSource(ctx context.Context, ts oauth2.TokenSource, config *Config) (oauth2.TokenSource, error) { + if len(config.Scopes) == 0 { + return nil, fmt.Errorf("impersonate: scopes must be provided") + } + its := impersonatedTokenSource{ + ctx: ctx, + ts: ts, + name: formatIAMServiceAccountName(config.Target), + // Default to the longest acceptable value of one hour as the token will + // be refreshed automatically. + lifetime: "3600s", + } + + its.delegates = make([]string, len(config.Delegates)) + for i, v := range config.Delegates { + its.delegates[i] = formatIAMServiceAccountName(v) + } + its.scopes = make([]string, len(config.Scopes)) + copy(its.scopes, config.Scopes) + + return oauth2.ReuseTokenSource(nil, its), nil +} + +func formatIAMServiceAccountName(name string) string { + return fmt.Sprintf("projects/-/serviceAccounts/%s", name) +} + +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type generateAccessTokenResp struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +type impersonatedTokenSource struct { + ctx context.Context + ts oauth2.TokenSource + + name string + lifetime string + scopes []string + delegates []string +} + +// Token returns an impersonated Token. +func (i impersonatedTokenSource) Token() (*oauth2.Token, error) { + hc := oauth2.NewClient(i.ctx, i.ts) + reqBody := generateAccessTokenReq{ + Delegates: i.delegates, + Lifetime: i.lifetime, + Scope: i.scopes, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to marshal request: %v", err) + } + url := fmt.Sprintf("https://iamcredentials.googleapis.com/v1/%s:generateAccessToken", i.name) + req, err := http.NewRequest("POST", url, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to create request: %v", err) + } + req = req.WithContext(i.ctx) + req.Header.Set("Content-Type", "application/json") + + resp, err := hc.Do(req) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to generate access token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to read body: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var accessTokenResp generateAccessTokenResp + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %v", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to parse expiry: %v", err) + } + return &oauth2.Token{ + AccessToken: accessTokenResp.AccessToken, + Expiry: expiry, + }, nil +} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go new file mode 100644 index 00000000000..76efdb22772 --- /dev/null +++ b/vendor/google.golang.org/api/internal/settings.go @@ -0,0 +1,142 @@ +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal supports the options and transport packages. +package internal + +import ( + "crypto/tls" + "errors" + "net/http" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/api/internal/impersonate" + "google.golang.org/grpc" +) + +// DialSettings holds information needed to establish a connection with a +// Google API service. +type DialSettings struct { + Endpoint string + DefaultEndpoint string + DefaultMTLSEndpoint string + Scopes []string + DefaultScopes []string + EnableJwtWithScope bool + TokenSource oauth2.TokenSource + Credentials *google.Credentials + CredentialsFile string // if set, Token Source is ignored. + CredentialsJSON []byte + InternalCredentials *google.Credentials + UserAgent string + APIKey string + Audiences []string + DefaultAudience string + HTTPClient *http.Client + GRPCDialOpts []grpc.DialOption + GRPCConn *grpc.ClientConn + GRPCConnPool ConnPool + GRPCConnPoolSize int + NoAuth bool + TelemetryDisabled bool + ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + CustomClaims map[string]interface{} + SkipValidation bool + ImpersonationConfig *impersonate.Config + EnableDirectPath bool + AllowNonDefaultServiceAccount bool + + // Google API system parameters. For more information please read: + // https://cloud.google.com/apis/docs/system-parameters + QuotaProject string + RequestReason string +} + +// GetScopes returns the user-provided scopes, if set, or else falls back to the +// default scopes. +func (ds *DialSettings) GetScopes() []string { + if len(ds.Scopes) > 0 { + return ds.Scopes + } + return ds.DefaultScopes +} + +// GetAudience returns the user-provided audience, if set, or else falls back to the default audience. +func (ds *DialSettings) GetAudience() string { + if ds.HasCustomAudience() { + return ds.Audiences[0] + } + return ds.DefaultAudience +} + +// HasCustomAudience returns true if a custom audience is provided by users. +func (ds *DialSettings) HasCustomAudience() bool { + return len(ds.Audiences) > 0 +} + +// Validate reports an error if ds is invalid. +func (ds *DialSettings) Validate() error { + if ds.SkipValidation { + return nil + } + hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil + if ds.NoAuth && hasCreds { + return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials") + } + // Credentials should not appear with other options. + // We currently allow TokenSource and CredentialsFile to coexist. + // TODO(jba): make TokenSource & CredentialsFile an error (breaking change). + nCreds := 0 + if ds.Credentials != nil { + nCreds++ + } + if ds.CredentialsJSON != nil { + nCreds++ + } + if ds.CredentialsFile != "" { + nCreds++ + } + if ds.APIKey != "" { + nCreds++ + } + if ds.TokenSource != nil { + nCreds++ + } + if len(ds.Scopes) > 0 && len(ds.Audiences) > 0 { + return errors.New("WithScopes is incompatible with WithAudience") + } + // Accept only one form of credentials, except we allow TokenSource and CredentialsFile for backwards compatibility. + if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") { + return errors.New("multiple credential options provided") + } + if ds.GRPCConn != nil && ds.GRPCConnPool != nil { + return errors.New("WithGRPCConn is incompatible with WithConnPool") + } + if ds.HTTPClient != nil && ds.GRPCConnPool != nil { + return errors.New("WithHTTPClient is incompatible with WithConnPool") + } + if ds.HTTPClient != nil && ds.GRPCConn != nil { + return errors.New("WithHTTPClient is incompatible with WithGRPCConn") + } + if ds.HTTPClient != nil && ds.GRPCDialOpts != nil { + return errors.New("WithHTTPClient is incompatible with gRPC dial options") + } + if ds.HTTPClient != nil && ds.QuotaProject != "" { + return errors.New("WithHTTPClient is incompatible with QuotaProject") + } + if ds.HTTPClient != nil && ds.RequestReason != "" { + return errors.New("WithHTTPClient is incompatible with RequestReason") + } + if ds.HTTPClient != nil && ds.ClientCertSource != nil { + return errors.New("WithHTTPClient is incompatible with WithClientCertSource") + } + if ds.ClientCertSource != nil && (ds.GRPCConn != nil || ds.GRPCConnPool != nil || ds.GRPCConnPoolSize != 0 || ds.GRPCDialOpts != nil) { + return errors.New("WithClientCertSource is currently only supported for HTTP. gRPC settings are incompatible") + } + if ds.ImpersonationConfig != nil && len(ds.ImpersonationConfig.Scopes) == 0 && len(ds.Scopes) == 0 { + return errors.New("WithImpersonatedCredentials requires scopes being provided") + } + return nil +} diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE b/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE new file mode 100644 index 00000000000..7109c6ef932 --- /dev/null +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Joshua Tacoma. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA b/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA new file mode 100644 index 00000000000..c7f86fcd5fd --- /dev/null +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA @@ -0,0 +1,14 @@ +name: "uritemplates" +description: + "Package uritemplates is a level 4 implementation of RFC 6570 (URI " + "Template, http://tools.ietf.org/html/rfc6570)." + +third_party { + url { + type: GIT + value: "https://github.com/jtacoma/uritemplates" + } + version: "0.1" + last_upgrade_date { year: 2014 month: 8 day: 18 } + license_type: NOTICE +} diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go b/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go new file mode 100644 index 00000000000..8c27d19d752 --- /dev/null +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go @@ -0,0 +1,248 @@ +// Copyright 2013 Joshua Tacoma. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uritemplates is a level 3 implementation of RFC 6570 (URI +// Template, http://tools.ietf.org/html/rfc6570). +// uritemplates does not support composite values (in Go: slices or maps) +// and so does not qualify as a level 4 implementation. +package uritemplates + +import ( + "bytes" + "errors" + "regexp" + "strconv" + "strings" +) + +var ( + unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") + reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") + validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") + hex = []byte("0123456789ABCDEF") +) + +func pctEncode(src []byte) []byte { + dst := make([]byte, len(src)*3) + for i, b := range src { + buf := dst[i*3 : i*3+3] + buf[0] = 0x25 + buf[1] = hex[b/16] + buf[2] = hex[b%16] + } + return dst +} + +// pairWriter is a convenience struct which allows escaped and unescaped +// versions of the template to be written in parallel. +type pairWriter struct { + escaped, unescaped bytes.Buffer +} + +// Write writes the provided string directly without any escaping. +func (w *pairWriter) Write(s string) { + w.escaped.WriteString(s) + w.unescaped.WriteString(s) +} + +// Escape writes the provided string, escaping the string for the +// escaped output. +func (w *pairWriter) Escape(s string, allowReserved bool) { + w.unescaped.WriteString(s) + if allowReserved { + w.escaped.Write(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } else { + w.escaped.Write(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) + } +} + +// Escaped returns the escaped string. +func (w *pairWriter) Escaped() string { + return w.escaped.String() +} + +// Unescaped returns the unescaped string. +func (w *pairWriter) Unescaped() string { + return w.unescaped.String() +} + +// A uriTemplate is a parsed representation of a URI template. +type uriTemplate struct { + raw string + parts []templatePart +} + +// parse parses a URI template string into a uriTemplate object. +func parse(rawTemplate string) (*uriTemplate, error) { + split := strings.Split(rawTemplate, "{") + parts := make([]templatePart, len(split)*2-1) + for i, s := range split { + if i == 0 { + if strings.Contains(s, "}") { + return nil, errors.New("unexpected }") + } + parts[i].raw = s + continue + } + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + return nil, errors.New("malformed template") + } + expression := subsplit[0] + var err error + parts[i*2-1], err = parseExpression(expression) + if err != nil { + return nil, err + } + parts[i*2].raw = subsplit[1] + } + return &uriTemplate{ + raw: rawTemplate, + parts: parts, + }, nil +} + +type templatePart struct { + raw string + terms []templateTerm + first string + sep string + named bool + ifemp string + allowReserved bool +} + +type templateTerm struct { + name string + explode bool + truncate int +} + +func parseExpression(expression string) (result templatePart, err error) { + switch expression[0] { + case '+': + result.sep = "," + result.allowReserved = true + expression = expression[1:] + case '.': + result.first = "." + result.sep = "." + expression = expression[1:] + case '/': + result.first = "/" + result.sep = "/" + expression = expression[1:] + case ';': + result.first = ";" + result.sep = ";" + result.named = true + expression = expression[1:] + case '?': + result.first = "?" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '&': + result.first = "&" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '#': + result.first = "#" + result.sep = "," + result.allowReserved = true + expression = expression[1:] + default: + result.sep = "," + } + rawterms := strings.Split(expression, ",") + result.terms = make([]templateTerm, len(rawterms)) + for i, raw := range rawterms { + result.terms[i], err = parseTerm(raw) + if err != nil { + break + } + } + return result, err +} + +func parseTerm(term string) (result templateTerm, err error) { + // TODO(djd): Remove "*" suffix parsing once we check that no APIs have + // mistakenly used that attribute. + if strings.HasSuffix(term, "*") { + result.explode = true + term = term[:len(term)-1] + } + split := strings.Split(term, ":") + if len(split) == 1 { + result.name = term + } else if len(split) == 2 { + result.name = split[0] + var parsed int64 + parsed, err = strconv.ParseInt(split[1], 10, 0) + result.truncate = int(parsed) + } else { + err = errors.New("multiple colons in same term") + } + if !validname.MatchString(result.name) { + err = errors.New("not a valid name: " + result.name) + } + if result.explode && result.truncate > 0 { + err = errors.New("both explode and prefix modifiers on same term") + } + return result, err +} + +// Expand expands a URI template with a set of values to produce the +// resultant URI. Two forms of the result are returned: one with all the +// elements escaped, and one with the elements unescaped. +func (t *uriTemplate) Expand(values map[string]string) (escaped, unescaped string) { + var w pairWriter + for _, p := range t.parts { + p.expand(&w, values) + } + return w.Escaped(), w.Unescaped() +} + +func (tp *templatePart) expand(w *pairWriter, values map[string]string) { + if len(tp.raw) > 0 { + w.Write(tp.raw) + return + } + var first = true + for _, term := range tp.terms { + value, exists := values[term.name] + if !exists { + continue + } + if first { + w.Write(tp.first) + first = false + } else { + w.Write(tp.sep) + } + tp.expandString(w, term, value) + } +} + +func (tp *templatePart) expandName(w *pairWriter, name string, empty bool) { + if tp.named { + w.Write(name) + if empty { + w.Write(tp.ifemp) + } else { + w.Write("=") + } + } +} + +func (tp *templatePart) expandString(w *pairWriter, t templateTerm, s string) { + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + tp.expandName(w, t.name, len(s) == 0) + w.Escape(s, tp.allowReserved) +} diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go b/vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go new file mode 100644 index 00000000000..2e70b81543d --- /dev/null +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uritemplates + +// Expand parses then expands a URI template with a set of values to produce +// the resultant URI. Two forms of the result are returned: one with all the +// elements escaped, and one with the elements unescaped. +func Expand(path string, values map[string]string) (escaped, unescaped string, err error) { + template, err := parse(path) + if err != nil { + return "", "", err + } + escaped, unescaped = template.Expand(values) + return escaped, unescaped, nil +} diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go new file mode 100644 index 00000000000..e96a3316453 --- /dev/null +++ b/vendor/google.golang.org/api/internal/version.go @@ -0,0 +1,8 @@ +// Copyright 2022 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// Version is the current tagged release of the library. +const Version = "0.104.0" diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go new file mode 100644 index 00000000000..1799b5d9af5 --- /dev/null +++ b/vendor/google.golang.org/api/iterator/iterator.go @@ -0,0 +1,227 @@ +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package iterator provides support for standard Google API iterators. +// See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines. +package iterator + +import ( + "errors" + "fmt" + "reflect" +) + +// Done is returned by an iterator's Next method when the iteration is +// complete; when there are no more items to return. +var Done = errors.New("no more items in iterator") + +// We don't support mixed calls to Next and NextPage because they play +// with the paging state in incompatible ways. +var errMixed = errors.New("iterator: Next and NextPage called on same iterator") + +// PageInfo contains information about an iterator's paging state. +type PageInfo struct { + // Token is the token used to retrieve the next page of items from the + // API. You may set Token immediately after creating an iterator to + // begin iteration at a particular point. If Token is the empty string, + // the iterator will begin with the first eligible item. + // + // The result of setting Token after the first call to Next is undefined. + // + // After the underlying API method is called to retrieve a page of items, + // Token is set to the next-page token in the response. + Token string + + // MaxSize is the maximum number of items returned by a call to the API. + // Set MaxSize as a hint to optimize the buffering behavior of the iterator. + // If zero, the page size is determined by the underlying service. + // + // Use Pager to retrieve a page of a specific, exact size. + MaxSize int + + // The error state of the iterator. Manipulated by PageInfo.next and Pager. + // This is a latch: it starts as nil, and once set should never change. + err error + + // If true, no more calls to fetch should be made. Set to true when fetch + // returns an empty page token. The iterator is Done when this is true AND + // the buffer is empty. + atEnd bool + + // Function that fetches a page from the underlying service. It should pass + // the pageSize and pageToken arguments to the service, fill the buffer + // with the results from the call, and return the next-page token returned + // by the service. The function must not remove any existing items from the + // buffer. If the underlying RPC takes an int32 page size, pageSize should + // be silently truncated. + fetch func(pageSize int, pageToken string) (nextPageToken string, err error) + + // Function that returns the number of currently buffered items. + bufLen func() int + + // Function that returns the buffer, after setting the buffer variable to nil. + takeBuf func() interface{} + + // Set to true on first call to PageInfo.next or Pager.NextPage. Used to check + // for calls to both Next and NextPage with the same iterator. + nextCalled, nextPageCalled bool +} + +// NewPageInfo exposes internals for iterator implementations. +// It is not a stable interface. +var NewPageInfo = newPageInfo + +// newPageInfo creates and returns a PageInfo and a next func. If an iterator can +// support paging, its iterator-creating method should call this. Each time the +// iterator's Next is called, it should call the returned next fn to determine +// whether a next item exists, and if so it should pop an item from the buffer. +// +// The fetch, bufLen and takeBuf arguments provide access to the iterator's +// internal slice of buffered items. They behave as described in PageInfo, above. +// +// The return value is the PageInfo.next method bound to the returned PageInfo value. +// (Returning it avoids exporting PageInfo.next.) +// +// Note: the returned PageInfo and next fn do not remove items from the buffer. +// It is up to the iterator using these to remove items from the buffer: +// typically by performing a pop in its Next. If items are not removed from the +// buffer, memory may grow unbounded. +func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (pi *PageInfo, next func() error) { + pi = &PageInfo{ + fetch: fetch, + bufLen: bufLen, + takeBuf: takeBuf, + } + return pi, pi.next +} + +// Remaining returns the number of items available before the iterator makes another API call. +func (pi *PageInfo) Remaining() int { return pi.bufLen() } + +// next provides support for an iterator's Next function. An iterator's Next +// should return the error returned by next if non-nil; else it can assume +// there is at least one item in its buffer, and it should return that item and +// remove it from the buffer. +func (pi *PageInfo) next() error { + pi.nextCalled = true + if pi.err != nil { // Once we get an error, always return it. + // TODO(jba): fix so users can retry on transient errors? Probably not worth it. + return pi.err + } + if pi.nextPageCalled { + pi.err = errMixed + return pi.err + } + // Loop until we get some items or reach the end. + for pi.bufLen() == 0 && !pi.atEnd { + if err := pi.fill(pi.MaxSize); err != nil { + pi.err = err + return pi.err + } + if pi.Token == "" { + pi.atEnd = true + } + } + // Either the buffer is non-empty or pi.atEnd is true (or both). + if pi.bufLen() == 0 { + // The buffer is empty and pi.atEnd is true, i.e. the service has no + // more items. + pi.err = Done + } + return pi.err +} + +// Call the service to fill the buffer, using size and pi.Token. Set pi.Token to the +// next-page token returned by the call. +// If fill returns a non-nil error, the buffer will be empty. +func (pi *PageInfo) fill(size int) error { + tok, err := pi.fetch(size, pi.Token) + if err != nil { + pi.takeBuf() // clear the buffer + return err + } + pi.Token = tok + return nil +} + +// Pageable is implemented by iterators that support paging. +type Pageable interface { + // PageInfo returns paging information associated with the iterator. + PageInfo() *PageInfo +} + +// Pager supports retrieving iterator items a page at a time. +type Pager struct { + pageInfo *PageInfo + pageSize int +} + +// NewPager returns a pager that uses iter. Calls to its NextPage method will +// obtain exactly pageSize items, unless fewer remain. The pageToken argument +// indicates where to start the iteration. Pass the empty string to start at +// the beginning, or pass a token retrieved from a call to Pager.NextPage. +// +// If you use an iterator with a Pager, you must not call Next on the iterator. +func NewPager(iter Pageable, pageSize int, pageToken string) *Pager { + p := &Pager{ + pageInfo: iter.PageInfo(), + pageSize: pageSize, + } + p.pageInfo.Token = pageToken + if pageSize <= 0 { + p.pageInfo.err = errors.New("iterator: page size must be positive") + } + return p +} + +// NextPage retrieves a sequence of items from the iterator and appends them +// to slicep, which must be a pointer to a slice of the iterator's item type. +// Exactly p.pageSize items will be appended, unless fewer remain. +// +// The first return value is the page token to use for the next page of items. +// If empty, there are no more pages. Aside from checking for the end of the +// iteration, the returned page token is only needed if the iteration is to be +// resumed a later time, in another context (possibly another process). +// +// The second return value is non-nil if an error occurred. It will never be +// the special iterator sentinel value Done. To recognize the end of the +// iteration, compare nextPageToken to the empty string. +// +// It is possible for NextPage to return a single zero-length page along with +// an empty page token when there are no more items in the iteration. +func (p *Pager) NextPage(slicep interface{}) (nextPageToken string, err error) { + p.pageInfo.nextPageCalled = true + if p.pageInfo.err != nil { + return "", p.pageInfo.err + } + if p.pageInfo.nextCalled { + p.pageInfo.err = errMixed + return "", p.pageInfo.err + } + if p.pageInfo.bufLen() > 0 { + return "", errors.New("must call NextPage with an empty buffer") + } + // The buffer must be empty here, so takeBuf is a no-op. We call it just to get + // the buffer's type. + wantSliceType := reflect.PtrTo(reflect.ValueOf(p.pageInfo.takeBuf()).Type()) + if slicep == nil { + return "", errors.New("nil passed to Pager.NextPage") + } + vslicep := reflect.ValueOf(slicep) + if vslicep.Type() != wantSliceType { + return "", fmt.Errorf("slicep should be of type %s, got %T", wantSliceType, slicep) + } + for p.pageInfo.bufLen() < p.pageSize { + if err := p.pageInfo.fill(p.pageSize - p.pageInfo.bufLen()); err != nil { + p.pageInfo.err = err + return "", p.pageInfo.err + } + if p.pageInfo.Token == "" { + break + } + } + e := vslicep.Elem() + e.Set(reflect.AppendSlice(e, reflect.ValueOf(p.pageInfo.takeBuf()))) + return p.pageInfo.Token, nil +} diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go new file mode 100644 index 00000000000..343a5a965eb --- /dev/null +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -0,0 +1,136 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internaloption contains options used internally by Google client code. +package internaloption + +import ( + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" + "google.golang.org/api/option" +) + +type defaultEndpointOption string + +func (o defaultEndpointOption) Apply(settings *internal.DialSettings) { + settings.DefaultEndpoint = string(o) +} + +// WithDefaultEndpoint is an option that indicates the default endpoint. +// +// It should only be used internally by generated clients. +// +// This is similar to WithEndpoint, but allows us to determine whether the user has overridden the default endpoint. +func WithDefaultEndpoint(url string) option.ClientOption { + return defaultEndpointOption(url) +} + +type defaultMTLSEndpointOption string + +func (o defaultMTLSEndpointOption) Apply(settings *internal.DialSettings) { + settings.DefaultMTLSEndpoint = string(o) +} + +// WithDefaultMTLSEndpoint is an option that indicates the default mTLS endpoint. +// +// It should only be used internally by generated clients. +func WithDefaultMTLSEndpoint(url string) option.ClientOption { + return defaultMTLSEndpointOption(url) +} + +// SkipDialSettingsValidation bypasses validation on ClientOptions. +// +// It should only be used internally. +func SkipDialSettingsValidation() option.ClientOption { + return skipDialSettingsValidation{} +} + +type skipDialSettingsValidation struct{} + +func (s skipDialSettingsValidation) Apply(settings *internal.DialSettings) { + settings.SkipValidation = true +} + +// EnableDirectPath returns a ClientOption that overrides the default +// attempt to use DirectPath. +// +// It should only be used internally by generated clients. +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func EnableDirectPath(dp bool) option.ClientOption { + return enableDirectPath(dp) +} + +type enableDirectPath bool + +func (e enableDirectPath) Apply(o *internal.DialSettings) { + o.EnableDirectPath = bool(e) +} + +// AllowNonDefaultServiceAccount returns a ClientOption that overrides the default +// requirement for using the default service account for DirectPath. +// +// It should only be used internally by generated clients. +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func AllowNonDefaultServiceAccount(nd bool) option.ClientOption { + return allowNonDefaultServiceAccount(nd) +} + +type allowNonDefaultServiceAccount bool + +func (a allowNonDefaultServiceAccount) Apply(o *internal.DialSettings) { + o.AllowNonDefaultServiceAccount = bool(a) +} + +// WithDefaultAudience returns a ClientOption that specifies a default audience +// to be used as the audience field ("aud") for the JWT token authentication. +// +// It should only be used internally by generated clients. +func WithDefaultAudience(audience string) option.ClientOption { + return withDefaultAudience(audience) +} + +type withDefaultAudience string + +func (w withDefaultAudience) Apply(o *internal.DialSettings) { + o.DefaultAudience = string(w) +} + +// WithDefaultScopes returns a ClientOption that overrides the default OAuth2 +// scopes to be used for a service. +// +// It should only be used internally by generated clients. +func WithDefaultScopes(scope ...string) option.ClientOption { + return withDefaultScopes(scope) +} + +type withDefaultScopes []string + +func (w withDefaultScopes) Apply(o *internal.DialSettings) { + o.DefaultScopes = make([]string, len(w)) + copy(o.DefaultScopes, w) +} + +// EnableJwtWithScope returns a ClientOption that specifies if scope can be used +// with self-signed JWT. +func EnableJwtWithScope() option.ClientOption { + return enableJwtWithScope(true) +} + +type enableJwtWithScope bool + +func (w enableJwtWithScope) Apply(o *internal.DialSettings) { + o.EnableJwtWithScope = bool(w) +} + +// WithCredentials returns a client option to specify credentials which will be used to authenticate API calls. +// This credential takes precedence over all other credential options. +func WithCredentials(creds *google.Credentials) option.ClientOption { + return (*withCreds)(creds) +} + +type withCreds google.Credentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.InternalCredentials = (*google.Credentials)(w) +} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go new file mode 100644 index 00000000000..b2085a1949a --- /dev/null +++ b/vendor/google.golang.org/api/option/option.go @@ -0,0 +1,345 @@ +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package option contains options for Google API clients. +package option + +import ( + "crypto/tls" + "net/http" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" + "google.golang.org/api/internal/impersonate" + "google.golang.org/grpc" +) + +// A ClientOption is an option for a Google API client. +type ClientOption interface { + Apply(*internal.DialSettings) +} + +// WithTokenSource returns a ClientOption that specifies an OAuth2 token +// source to be used as the basis for authentication. +func WithTokenSource(s oauth2.TokenSource) ClientOption { + return withTokenSource{s} +} + +type withTokenSource struct{ ts oauth2.TokenSource } + +func (w withTokenSource) Apply(o *internal.DialSettings) { + o.TokenSource = w.ts +} + +type withCredFile string + +func (w withCredFile) Apply(o *internal.DialSettings) { + o.CredentialsFile = string(w) +} + +// WithCredentialsFile returns a ClientOption that authenticates +// API calls with the given service account or refresh token JSON +// credentials file. +func WithCredentialsFile(filename string) ClientOption { + return withCredFile(filename) +} + +// WithServiceAccountFile returns a ClientOption that uses a Google service +// account credentials file to authenticate. +// +// Deprecated: Use WithCredentialsFile instead. +func WithServiceAccountFile(filename string) ClientOption { + return WithCredentialsFile(filename) +} + +// WithCredentialsJSON returns a ClientOption that authenticates +// API calls with the given service account or refresh token JSON +// credentials. +func WithCredentialsJSON(p []byte) ClientOption { + return withCredentialsJSON(p) +} + +type withCredentialsJSON []byte + +func (w withCredentialsJSON) Apply(o *internal.DialSettings) { + o.CredentialsJSON = make([]byte, len(w)) + copy(o.CredentialsJSON, w) +} + +// WithEndpoint returns a ClientOption that overrides the default endpoint +// to be used for a service. +func WithEndpoint(url string) ClientOption { + return withEndpoint(url) +} + +type withEndpoint string + +func (w withEndpoint) Apply(o *internal.DialSettings) { + o.Endpoint = string(w) +} + +// WithScopes returns a ClientOption that overrides the default OAuth2 scopes +// to be used for a service. +// +// If both WithScopes and WithTokenSource are used, scope settings from the +// token source will be used instead. +func WithScopes(scope ...string) ClientOption { + return withScopes(scope) +} + +type withScopes []string + +func (w withScopes) Apply(o *internal.DialSettings) { + o.Scopes = make([]string, len(w)) + copy(o.Scopes, w) +} + +// WithUserAgent returns a ClientOption that sets the User-Agent. This option +// is incompatible with the [WithHTTPClient] option. If you wish to provide a +// custom client you will need to add this header via RoundTripper middleware. +func WithUserAgent(ua string) ClientOption { + return withUA(ua) +} + +type withUA string + +func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) } + +// WithHTTPClient returns a ClientOption that specifies the HTTP client to use +// as the basis of communications. This option may only be used with services +// that support HTTP as their communication transport. When used, the +// WithHTTPClient option takes precedent over all other supplied options. +func WithHTTPClient(client *http.Client) ClientOption { + return withHTTPClient{client} +} + +type withHTTPClient struct{ client *http.Client } + +func (w withHTTPClient) Apply(o *internal.DialSettings) { + o.HTTPClient = w.client +} + +// WithGRPCConn returns a ClientOption that specifies the gRPC client +// connection to use as the basis of communications. This option may only be +// used with services that support gRPC as their communication transport. When +// used, the WithGRPCConn option takes precedent over all other supplied +// options. +func WithGRPCConn(conn *grpc.ClientConn) ClientOption { + return withGRPCConn{conn} +} + +type withGRPCConn struct{ conn *grpc.ClientConn } + +func (w withGRPCConn) Apply(o *internal.DialSettings) { + o.GRPCConn = w.conn +} + +// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption +// to an underlying gRPC dial. It does not work with WithGRPCConn. +func WithGRPCDialOption(opt grpc.DialOption) ClientOption { + return withGRPCDialOption{opt} +} + +type withGRPCDialOption struct{ opt grpc.DialOption } + +func (w withGRPCDialOption) Apply(o *internal.DialSettings) { + o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt) +} + +// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC +// connections that requests will be balanced between. +func WithGRPCConnectionPool(size int) ClientOption { + return withGRPCConnectionPool(size) +} + +type withGRPCConnectionPool int + +func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) { + o.GRPCConnPoolSize = int(w) +} + +// WithAPIKey returns a ClientOption that specifies an API key to be used +// as the basis for authentication. +// +// API Keys can only be used for JSON-over-HTTP APIs, including those under +// the import path google.golang.org/api/.... +func WithAPIKey(apiKey string) ClientOption { + return withAPIKey(apiKey) +} + +type withAPIKey string + +func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) } + +// WithAudiences returns a ClientOption that specifies an audience to be used +// as the audience field ("aud") for the JWT token authentication. +func WithAudiences(audience ...string) ClientOption { + return withAudiences(audience) +} + +type withAudiences []string + +func (w withAudiences) Apply(o *internal.DialSettings) { + o.Audiences = make([]string, len(w)) + copy(o.Audiences, w) +} + +// WithoutAuthentication returns a ClientOption that specifies that no +// authentication should be used. It is suitable only for testing and for +// accessing public resources, like public Google Cloud Storage buckets. +// It is an error to provide both WithoutAuthentication and any of WithAPIKey, +// WithTokenSource, WithCredentialsFile or WithServiceAccountFile. +func WithoutAuthentication() ClientOption { + return withoutAuthentication{} +} + +type withoutAuthentication struct{} + +func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true } + +// WithQuotaProject returns a ClientOption that specifies the project used +// for quota and billing purposes. +// +// For more information please read: +// https://cloud.google.com/apis/docs/system-parameters +func WithQuotaProject(quotaProject string) ClientOption { + return withQuotaProject(quotaProject) +} + +type withQuotaProject string + +func (w withQuotaProject) Apply(o *internal.DialSettings) { + o.QuotaProject = string(w) +} + +// WithRequestReason returns a ClientOption that specifies a reason for +// making the request, which is intended to be recorded in audit logging. +// An example reason would be a support-case ticket number. +// +// For more information please read: +// https://cloud.google.com/apis/docs/system-parameters +func WithRequestReason(requestReason string) ClientOption { + return withRequestReason(requestReason) +} + +type withRequestReason string + +func (w withRequestReason) Apply(o *internal.DialSettings) { + o.RequestReason = string(w) +} + +// WithTelemetryDisabled returns a ClientOption that disables default telemetry (OpenCensus) +// settings on gRPC and HTTP clients. +// An example reason would be to bind custom telemetry that overrides the defaults. +func WithTelemetryDisabled() ClientOption { + return withTelemetryDisabled{} +} + +type withTelemetryDisabled struct{} + +func (w withTelemetryDisabled) Apply(o *internal.DialSettings) { + o.TelemetryDisabled = true +} + +// ClientCertSource is a function that returns a TLS client certificate to be used +// when opening TLS connections. +// +// It follows the same semantics as crypto/tls.Config.GetClientCertificate. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +type ClientCertSource = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// WithClientCertSource returns a ClientOption that specifies a +// callback function for obtaining a TLS client certificate. +// +// This option is used for supporting mTLS authentication, where the +// server validates the client certifcate when establishing a connection. +// +// The callback function will be invoked whenever the server requests a +// certificate from the client. Implementations of the callback function +// should try to ensure that a valid certificate can be repeatedly returned +// on demand for the entire life cycle of the transport client. If a nil +// Certificate is returned (i.e. no Certificate can be obtained), an error +// should be returned. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithClientCertSource(s ClientCertSource) ClientOption { + return withClientCertSource{s} +} + +type withClientCertSource struct{ s ClientCertSource } + +func (w withClientCertSource) Apply(o *internal.DialSettings) { + o.ClientCertSource = w.s +} + +// ImpersonateCredentials returns a ClientOption that will impersonate the +// target service account. +// +// In order to impersonate the target service account +// the base service account must have the Service Account Token Creator role, +// roles/iam.serviceAccountTokenCreator, on the target service account. +// See https://cloud.google.com/iam/docs/understanding-service-accounts. +// +// Optionally, delegates can be used during impersonation if the base service +// account lacks the token creator role on the target. When using delegates, +// each service account must be granted roles/iam.serviceAccountTokenCreator +// on the next service account in the chain. +// +// For example, if a base service account of SA1 is trying to impersonate target +// service account SA2 while using delegate service accounts DSA1 and DSA2, +// the following must be true: +// +// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on +// DSA1. +// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. +// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. +// +// The resulting impersonated credential will either have the default scopes of +// the client being instantiating or the scopes from WithScopes if provided. +// Scopes are required for creating impersonated credentials, so if this option +// is used while not using a NewClient/NewService function, WithScopes must also +// be explicitly passed in as well. +// +// If the base credential is an authorized user and not a service account, or if +// the option WithQuotaProject is set, the target service account must have a +// role that grants the serviceusage.services.use permission such as +// roles/serviceusage.serviceUsageConsumer. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +// +// Deprecated: This option has been replaced by `impersonate` package: +// `google.golang.org/api/impersonate`. Please use the `impersonate` package +// instead with the WithTokenSource option. +func ImpersonateCredentials(target string, delegates ...string) ClientOption { + return impersonateServiceAccount{ + target: target, + delegates: delegates, + } +} + +type impersonateServiceAccount struct { + target string + delegates []string +} + +func (i impersonateServiceAccount) Apply(o *internal.DialSettings) { + o.ImpersonationConfig = &impersonate.Config{ + Target: i.target, + } + o.ImpersonationConfig.Delegates = make([]string, len(i.delegates)) + copy(o.ImpersonationConfig.Delegates, i.delegates) +} + +type withCreds google.Credentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.Credentials)(w) +} + +// WithCredentials returns a ClientOption that authenticates API calls. +func WithCredentials(creds *google.Credentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/vendor/google.golang.org/api/transport/cert/default_cert.go b/vendor/google.golang.org/api/transport/cert/default_cert.go new file mode 100644 index 00000000000..21d0251531c --- /dev/null +++ b/vendor/google.golang.org/api/transport/cert/default_cert.go @@ -0,0 +1,58 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "errors" + "sync" +) + +// defaultCertData holds all the variables pertaining to +// the default certficate source created by DefaultSource. +// +// A singleton model is used to allow the source to be reused +// by the transport layer. +type defaultCertData struct { + once sync.Once + source Source + err error +} + +var ( + defaultCert defaultCertData +) + +// Source is a function that can be passed into crypto/tls.Config.GetClientCertificate. +type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable. +var errSourceUnavailable = errors.New("certificate source is unavailable") + +// DefaultSource returns a certificate source using the preferred EnterpriseCertificateProxySource. +// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource. +// +// If neither source is available (due to missing configurations), a nil Source and a nil Error are +// returned to indicate that a default certificate source is unavailable. +func DefaultSource() (Source, error) { + defaultCert.once.Do(func() { + defaultCert.source, defaultCert.err = NewEnterpriseCertificateProxySource("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.source, defaultCert.err = NewSecureConnectSource("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.source, defaultCert.err = nil, nil + } + } + }) + return defaultCert.source, defaultCert.err +} diff --git a/vendor/google.golang.org/api/transport/cert/enterprise_cert.go b/vendor/google.golang.org/api/transport/cert/enterprise_cert.go new file mode 100644 index 00000000000..eaa52e07c08 --- /dev/null +++ b/vendor/google.golang.org/api/transport/cert/enterprise_cert.go @@ -0,0 +1,56 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "errors" + "os" + + "github.com/googleapis/enterprise-certificate-proxy/client" +) + +type ecpSource struct { + key *client.Key +} + +// NewEnterpriseCertificateProxySource creates a certificate source +// using the Enterprise Certificate Proxy client, which delegates +// certifcate related operations to an OS-specific "signer binary" +// that communicates with the native keystore (ex. keychain on MacOS). +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate issuer and the location of the signer binary. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewEnterpriseCertificateProxySource(configFilePath string) (Source, error) { + key, err := client.Cred(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // Config file missing means Enterprise Certificate Proxy is not supported. + return nil, errSourceUnavailable + } + return nil, err + } + + return (&ecpSource{ + key: key, + }).getClientCertificate, nil +} + +func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + var cert tls.Certificate + cert.PrivateKey = s.key + cert.Certificate = s.key.CertificateChain() + return &cert, nil +} diff --git a/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go b/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go new file mode 100644 index 00000000000..5913cab8017 --- /dev/null +++ b/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go @@ -0,0 +1,123 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "os/user" + "path/filepath" + "sync" + "time" +) + +const ( + metadataPath = ".secureConnect" + metadataFile = "context_aware_metadata.json" +) + +type secureConnectSource struct { + metadata secureConnectMetadata + + // Cache the cert to avoid executing helper command repeatedly. + cachedCertMutex sync.Mutex + cachedCert *tls.Certificate +} + +type secureConnectMetadata struct { + Cmd []string `json:"cert_provider_command"` +} + +// NewSecureConnectSource creates a certificate source using +// the Secure Connect Helper and its associated metadata file. +// +// The configFilePath points to the location of the context aware metadata file. +// If configFilePath is empty, use the default context aware metadata location. +func NewSecureConnectSource(configFilePath string) (Source, error) { + if configFilePath == "" { + user, err := user.Current() + if err != nil { + // Error locating the default config means Secure Connect is not supported. + return nil, errSourceUnavailable + } + configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) + } + + file, err := ioutil.ReadFile(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // Config file missing means Secure Connect is not supported. + return nil, errSourceUnavailable + } + return nil, err + } + + var metadata secureConnectMetadata + if err := json.Unmarshal(file, &metadata); err != nil { + return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err) + } + if err := validateMetadata(metadata); err != nil { + return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err) + } + return (&secureConnectSource{ + metadata: metadata, + }).getClientCertificate, nil +} + +func validateMetadata(metadata secureConnectMetadata) error { + if len(metadata.Cmd) == 0 { + return errors.New("empty cert_provider_command") + } + return nil +} + +func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + s.cachedCertMutex.Lock() + defer s.cachedCertMutex.Unlock() + if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) { + return s.cachedCert, nil + } + // Expand OS environment variables in the cert provider command such as "$HOME". + for i := 0; i < len(s.metadata.Cmd); i++ { + s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i]) + } + command := s.metadata.Cmd + data, err := exec.Command(command[0], command[1:]...).Output() + if err != nil { + return nil, err + } + cert, err := tls.X509KeyPair(data, data) + if err != nil { + return nil, err + } + s.cachedCert = &cert + return &cert, nil +} + +// isCertificateExpired returns true if the given cert is expired or invalid. +func isCertificateExpired(cert *tls.Certificate) bool { + if len(cert.Certificate) == 0 { + return true + } + parsed, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return true + } + return time.Now().After(parsed.NotAfter) +} diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go new file mode 100644 index 00000000000..efcc8e6c641 --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -0,0 +1,333 @@ +// Copyright 2015 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package grpc supports network connections to GRPC servers. +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package grpc + +import ( + "context" + "crypto/tls" + "errors" + "log" + "net" + "os" + "strings" + + "cloud.google.com/go/compute/metadata" + "go.opencensus.io/plugin/ocgrpc" + "golang.org/x/oauth2" + "google.golang.org/api/internal" + "google.golang.org/api/option" + "google.golang.org/api/transport/internal/dca" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + grpcgoogle "google.golang.org/grpc/credentials/google" + grpcinsecure "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/credentials/oauth" + + // Install grpclb, which is required for direct path. + _ "google.golang.org/grpc/balancer/grpclb" +) + +// Check env to disable DirectPath traffic. +const disableDirectPath = "GOOGLE_CLOUD_DISABLE_DIRECT_PATH" + +// Check env to decide if using google-c2p resolver for DirectPath traffic. +const enableDirectPathXds = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS" + +// Set at init time by dial_appengine.go. If nil, we're not on App Engine. +var appengineDialerHook func(context.Context) grpc.DialOption + +// Set at init time by dial_socketopt.go. If nil, socketopt is not supported. +var timeoutDialerOption grpc.DialOption + +// Dial returns a GRPC connection for use communicating with a Google cloud +// service, configured with the given ClientOptions. +func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + o, err := processAndValidateOpts(opts) + if err != nil { + return nil, err + } + if o.GRPCConnPool != nil { + return o.GRPCConnPool.Conn(), nil + } + // NOTE(cbro): We removed support for option.WithGRPCConnPool (GRPCConnPoolSize) + // on 2020-02-12 because RoundRobin and WithBalancer are deprecated and we need to remove usages of it. + // + // Connection pooling is only done via DialPool. + return dial(ctx, false, o) +} + +// DialInsecure returns an insecure GRPC connection for use communicating +// with fake or mock Google cloud service implementations, such as emulators. +// The connection is configured with the given ClientOptions. +func DialInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + o, err := processAndValidateOpts(opts) + if err != nil { + return nil, err + } + return dial(ctx, true, o) +} + +// DialPool returns a pool of GRPC connections for the given service. +// This differs from the connection pooling implementation used by Dial, which uses a custom GRPC load balancer. +// DialPool should be used instead of Dial when a pool is used by default or a different custom GRPC load balancer is needed. +// The context and options are shared between each Conn in the pool. +// The pool size is configured using the WithGRPCConnectionPool option. +// +// This API is subject to change as we further refine requirements. It will go away if gRPC stubs accept an interface instead of the concrete ClientConn type. See https://github.com/grpc/grpc-go/issues/1287. +func DialPool(ctx context.Context, opts ...option.ClientOption) (ConnPool, error) { + o, err := processAndValidateOpts(opts) + if err != nil { + return nil, err + } + if o.GRPCConnPool != nil { + return o.GRPCConnPool, nil + } + poolSize := o.GRPCConnPoolSize + if o.GRPCConn != nil { + // WithGRPCConn is technically incompatible with WithGRPCConnectionPool. + // Always assume pool size is 1 when a grpc.ClientConn is explicitly used. + poolSize = 1 + } + o.GRPCConnPoolSize = 0 // we don't *need* to set this to zero, but it's safe to. + + if poolSize == 0 || poolSize == 1 { + // Fast path for common case for a connection pool with a single connection. + conn, err := dial(ctx, false, o) + if err != nil { + return nil, err + } + return &singleConnPool{conn}, nil + } + + pool := &roundRobinConnPool{} + for i := 0; i < poolSize; i++ { + conn, err := dial(ctx, false, o) + if err != nil { + defer pool.Close() // NOTE: error from Close is ignored. + return nil, err + } + pool.conns = append(pool.conns, conn) + } + return pool, nil +} + +func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.ClientConn, error) { + if o.HTTPClient != nil { + return nil, errors.New("unsupported HTTP client specified") + } + if o.GRPCConn != nil { + return o.GRPCConn, nil + } + clientCertSource, endpoint, err := dca.GetClientCertificateSourceAndEndpoint(o) + if err != nil { + return nil, err + } + + var transportCreds credentials.TransportCredentials + if insecure { + transportCreds = grpcinsecure.NewCredentials() + } else { + transportCreds = credentials.NewTLS(&tls.Config{ + GetClientCertificate: clientCertSource, + }) + } + + // Initialize gRPC dial options with transport-level security options. + grpcOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(transportCreds), + } + + // Authentication can only be sent when communicating over a secure connection. + // + // TODO: Should we be more lenient in the future and allow sending credentials + // when dialing an insecure connection? + if !o.NoAuth && !insecure { + if o.APIKey != "" { + log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.") + } + creds, err := internal.Creds(ctx, o) + if err != nil { + return nil, err + } + + if o.QuotaProject == "" { + o.QuotaProject = internal.QuotaProjectFromCreds(creds) + } + + grpcOpts = append(grpcOpts, + grpc.WithPerRPCCredentials(grpcTokenSource{ + TokenSource: oauth.TokenSource{creds.TokenSource}, + quotaProject: o.QuotaProject, + requestReason: o.RequestReason, + }), + ) + + // Attempt Direct Path: + if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() { + // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. + grpcOpts = []grpc.DialOption{ + grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{oauth.TokenSource{creds.TokenSource}}))} + if timeoutDialerOption != nil { + grpcOpts = append(grpcOpts, timeoutDialerOption) + } + // Check if google-c2p resolver is enabled for DirectPath + if strings.EqualFold(os.Getenv(enableDirectPathXds), "true") { + // google-c2p resolver target must not have a port number + if addr, _, err := net.SplitHostPort(endpoint); err == nil { + endpoint = "google-c2p:///" + addr + } else { + endpoint = "google-c2p:///" + endpoint + } + } else { + if !strings.HasPrefix(endpoint, "dns:///") { + endpoint = "dns:///" + endpoint + } + grpcOpts = append(grpcOpts, + // For now all DirectPath go clients will be using the following lb config, but in future + // when different services need different configs, then we should change this to a + // per-service config. + grpc.WithDisableServiceConfig(), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`)) + } + // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor. + } + } + + if appengineDialerHook != nil { + // Use the Socket API on App Engine. + // appengine dialer will override socketopt dialer + grpcOpts = append(grpcOpts, appengineDialerHook(ctx)) + } + + // Add tracing, but before the other options, so that clients can override the + // gRPC stats handler. + // This assumes that gRPC options are processed in order, left to right. + grpcOpts = addOCStatsHandler(grpcOpts, o) + grpcOpts = append(grpcOpts, o.GRPCDialOpts...) + if o.UserAgent != "" { + grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) + } + + return grpc.DialContext(ctx, endpoint, grpcOpts...) +} + +func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { + if settings.TelemetryDisabled { + return opts + } + return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) +} + +// grpcTokenSource supplies PerRPCCredentials from an oauth.TokenSource. +type grpcTokenSource struct { + oauth.TokenSource + + // Additional metadata attached as headers. + quotaProject string + requestReason string +} + +// GetRequestMetadata gets the request metadata as a map from a grpcTokenSource. +func (ts grpcTokenSource) GetRequestMetadata(ctx context.Context, uri ...string) ( + map[string]string, error) { + metadata, err := ts.TokenSource.GetRequestMetadata(ctx, uri...) + if err != nil { + return nil, err + } + + // Attach system parameter + if ts.quotaProject != "" { + metadata["X-goog-user-project"] = ts.quotaProject + } + if ts.requestReason != "" { + metadata["X-goog-request-reason"] = ts.requestReason + } + return metadata, nil +} + +func isDirectPathEnabled(endpoint string, o *internal.DialSettings) bool { + if !o.EnableDirectPath { + return false + } + if !checkDirectPathEndPoint(endpoint) { + return false + } + if strings.EqualFold(os.Getenv(disableDirectPath), "true") { + return false + } + return true +} + +func isTokenSourceDirectPathCompatible(ts oauth2.TokenSource, o *internal.DialSettings) bool { + if ts == nil { + return false + } + tok, err := ts.Token() + if err != nil { + return false + } + if tok == nil { + return false + } + if o.AllowNonDefaultServiceAccount { + return true + } + if source, _ := tok.Extra("oauth2.google.tokenSource").(string); source != "compute-metadata" { + return false + } + if acct, _ := tok.Extra("oauth2.google.serviceAccount").(string); acct != "default" { + return false + } + return true +} + +func checkDirectPathEndPoint(endpoint string) bool { + // Only [dns:///]host[:port] is supported, not other schemes (e.g., "tcp://" or "unix://"). + // Also don't try direct path if the user has chosen an alternate name resolver + // (i.e., via ":///" prefix). + // + // TODO(cbro): once gRPC has introspectible options, check the user hasn't + // provided a custom dialer in gRPC options. + if strings.Contains(endpoint, "://") && !strings.HasPrefix(endpoint, "dns:///") { + return false + } + + if endpoint == "" { + return false + } + + return true +} + +func processAndValidateOpts(opts []option.ClientOption) (*internal.DialSettings, error) { + var o internal.DialSettings + for _, opt := range opts { + opt.Apply(&o) + } + if err := o.Validate(); err != nil { + return nil, err + } + + return &o, nil +} + +type connPoolOption struct{ ConnPool } + +// WithConnPool returns a ClientOption that specifies the ConnPool +// connection to use as the basis of communications. +// +// This is only to be used by Google client libraries internally, for example +// when creating a longrunning API client that shares the same connection pool +// as a service client. +func WithConnPool(p ConnPool) option.ClientOption { + return connPoolOption{p} +} + +func (o connPoolOption) Apply(s *internal.DialSettings) { + s.GRPCConnPool = o.ConnPool +} diff --git a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go new file mode 100644 index 00000000000..fd3dc0565d0 --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go @@ -0,0 +1,32 @@ +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build appengine +// +build appengine + +package grpc + +import ( + "context" + "net" + "time" + + "google.golang.org/appengine" + "google.golang.org/appengine/socket" + "google.golang.org/grpc" +) + +func init() { + // NOTE: dev_appserver doesn't currently support SSL. + // When it does, this code can be removed. + if appengine.IsDevAppServer() { + return + } + + appengineDialerHook = func(ctx context.Context) grpc.DialOption { + return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return socket.DialTimeout(ctx, "tcp", addr, timeout) + }) + } +} diff --git a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go new file mode 100644 index 00000000000..507cd3ec63a --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go @@ -0,0 +1,52 @@ +// Copyright 2019 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.11 && linux +// +build go1.11,linux + +package grpc + +import ( + "context" + "net" + "syscall" + + "google.golang.org/grpc" +) + +const ( + // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By + // default is 20 seconds. + tcpUserTimeoutMilliseconds = 20000 + + // Copied from golang.org/x/sys/unix.TCP_USER_TIMEOUT. + tcpUserTimeoutOp = 0x12 +) + +func init() { + // timeoutDialerOption is a grpc.DialOption that contains dialer with + // socket option TCP_USER_TIMEOUT. This dialer requires go versions 1.11+. + timeoutDialerOption = grpc.WithContextDialer(dialTCPUserTimeout) +} + +func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { + control := func(network, address string, c syscall.RawConn) error { + var syscallErr error + controlErr := c.Control(func(fd uintptr) { + syscallErr = syscall.SetsockoptInt( + int(fd), syscall.IPPROTO_TCP, tcpUserTimeoutOp, tcpUserTimeoutMilliseconds) + }) + if syscallErr != nil { + return syscallErr + } + if controlErr != nil { + return controlErr + } + return nil + } + d := &net.Dialer{ + Control: control, + } + return d.DialContext(ctx, "tcp", addr) +} diff --git a/vendor/google.golang.org/api/transport/grpc/pool.go b/vendor/google.golang.org/api/transport/grpc/pool.go new file mode 100644 index 00000000000..4cf94a2771e --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/pool.go @@ -0,0 +1,92 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package grpc + +import ( + "context" + "fmt" + "sync/atomic" + + "google.golang.org/api/internal" + "google.golang.org/grpc" +) + +// ConnPool is a pool of grpc.ClientConns. +type ConnPool = internal.ConnPool // NOTE(cbro): type alias to export the type. It must live in internal to avoid a circular dependency. + +var _ ConnPool = &roundRobinConnPool{} +var _ ConnPool = &singleConnPool{} + +// singleConnPool is a special case for a single connection. +type singleConnPool struct { + *grpc.ClientConn +} + +func (p *singleConnPool) Conn() *grpc.ClientConn { return p.ClientConn } +func (p *singleConnPool) Num() int { return 1 } + +type roundRobinConnPool struct { + conns []*grpc.ClientConn + + idx uint32 // access via sync/atomic +} + +func (p *roundRobinConnPool) Num() int { + return len(p.conns) +} + +func (p *roundRobinConnPool) Conn() *grpc.ClientConn { + i := atomic.AddUint32(&p.idx, 1) + return p.conns[i%uint32(len(p.conns))] +} + +func (p *roundRobinConnPool) Close() error { + var errs multiError + for _, conn := range p.conns { + if err := conn.Close(); err != nil { + errs = append(errs, err) + } + } + if len(errs) == 0 { + return nil + } + return errs +} + +func (p *roundRobinConnPool) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error { + return p.Conn().Invoke(ctx, method, args, reply, opts...) +} + +func (p *roundRobinConnPool) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + return p.Conn().NewStream(ctx, desc, method, opts...) +} + +// multiError represents errors from multiple conns in the group. +// +// TODO: figure out how and whether this is useful to export. End users should +// not be depending on the transport/grpc package directly, so there might need +// to be some service-specific multi-error type. +type multiError []error + +func (m multiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/vendor/google.golang.org/api/transport/internal/dca/dca.go b/vendor/google.golang.org/api/transport/internal/dca/dca.go new file mode 100644 index 00000000000..78004f0475f --- /dev/null +++ b/vendor/google.golang.org/api/transport/internal/dca/dca.go @@ -0,0 +1,143 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dca contains utils for implementing Device Certificate +// Authentication according to https://google.aip.dev/auth/4114 +// +// The overall logic for DCA is as follows: +// 1. If both endpoint override and client certificate are specified, use them as is. +// 2. If user does not specify client certificate, we will attempt to use default +// client certificate. +// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if +// client certificate is available and defaultEndpoint otherwise. +// +// Implications of the above logic: +// 1. If the user specifies a non-mTLS endpoint override but client certificate is +// available, we will pass along the cert anyway and let the server decide what to do. +// 2. If the user specifies an mTLS endpoint override but client certificate is not +// available, we will not fail-fast, but let backend throw error when connecting. +// +// We would like to avoid introducing client-side logic that parses whether the +// endpoint override is an mTLS url, since the url pattern may change at anytime. +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package dca + +import ( + "net/url" + "os" + "strings" + + "google.golang.org/api/internal" + "google.golang.org/api/transport/cert" +) + +const ( + mTLSModeAlways = "always" + mTLSModeNever = "never" + mTLSModeAuto = "auto" +) + +// GetClientCertificateSourceAndEndpoint is a convenience function that invokes +// getClientCertificateSource and getEndpoint sequentially and returns the client +// cert source and endpoint as a tuple. +func GetClientCertificateSourceAndEndpoint(settings *internal.DialSettings) (cert.Source, string, error) { + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return nil, "", err + } + endpoint, err := getEndpoint(settings, clientCertSource) + if err != nil { + return nil, "", err + } + return clientCertSource, endpoint, nil +} + +// getClientCertificateSource returns a default client certificate source, if +// not provided by the user. +// +// A nil default source can be returned if the source does not exist. Any exceptions +// encountered while initializing the default source will be reported as client +// error (ex. corrupt metadata file). +// +// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE +// must be set to "true" to allow certificate to be used (including user provided +// certificates). For details, see AIP-4114. +func getClientCertificateSource(settings *internal.DialSettings) (cert.Source, error) { + if !isClientCertificateEnabled() { + return nil, nil + } else if settings.ClientCertSource != nil { + return settings.ClientCertSource, nil + } else { + return cert.DefaultSource() + } +} + +func isClientCertificateEnabled() bool { + useClientCert := os.Getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") + // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. + return strings.ToLower(useClientCert) == "true" +} + +// getEndpoint returns the endpoint for the service, taking into account the +// user-provided endpoint override "settings.Endpoint". +// +// If no endpoint override is specified, we will either return the default endpoint or +// the default mTLS endpoint if a client certificate is available. +// +// You can override the default endpoint choice (mtls vs. regular) by setting the +// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// +// If the endpoint override is an address (host:port) rather than full base +// URL (ex. https://...), then the user-provided address will be merged into +// the default endpoint. For example, WithEndpoint("myhost:8000") and +// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" +func getEndpoint(settings *internal.DialSettings, clientCertSource cert.Source) (string, error) { + if settings.Endpoint == "" { + mtlsMode := getMTLSMode() + if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { + return settings.DefaultMTLSEndpoint, nil + } + return settings.DefaultEndpoint, nil + } + if strings.Contains(settings.Endpoint, "://") { + // User passed in a full URL path, use it verbatim. + return settings.Endpoint, nil + } + if settings.DefaultEndpoint == "" { + // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. + // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. + return settings.Endpoint, nil + } + + // Assume user-provided endpoint is host[:port], merge it with the default endpoint. + return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) +} + +func getMTLSMode() string { + mode := os.Getenv("GOOGLE_API_USE_MTLS_ENDPOINT") + if mode == "" { + mode = os.Getenv("GOOGLE_API_USE_MTLS") // Deprecated. + } + if mode == "" { + return mTLSModeAuto + } + return strings.ToLower(mode) +} + +func mergeEndpoints(baseURL, newHost string) (string, error) { + u, err := url.Parse(fixScheme(baseURL)) + if err != nil { + return "", err + } + return strings.Replace(baseURL, u.Host, newHost, 1), nil +} + +func fixScheme(baseURL string) string { + if !strings.Contains(baseURL, "://") { + return "https://" + baseURL + } + return baseURL +} diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go new file mode 100644 index 00000000000..4ec872e4606 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go @@ -0,0 +1,2822 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/socket/socket_service.proto + +package socket + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type RemoteSocketServiceError_ErrorCode int32 + +const ( + RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1 + RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2 + RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4 + RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5 + RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6 + RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7 +) + +var RemoteSocketServiceError_ErrorCode_name = map[int32]string{ + 1: "SYSTEM_ERROR", + 2: "GAI_ERROR", + 4: "FAILURE", + 5: "PERMISSION_DENIED", + 6: "INVALID_REQUEST", + 7: "SOCKET_CLOSED", +} +var RemoteSocketServiceError_ErrorCode_value = map[string]int32{ + "SYSTEM_ERROR": 1, + "GAI_ERROR": 2, + "FAILURE": 4, + "PERMISSION_DENIED": 5, + "INVALID_REQUEST": 6, + "SOCKET_CLOSED": 7, +} + +func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode { + p := new(RemoteSocketServiceError_ErrorCode) + *p = x + return p +} +func (x RemoteSocketServiceError_ErrorCode) String() string { + return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x)) +} +func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode") + if err != nil { + return err + } + *x = RemoteSocketServiceError_ErrorCode(value) + return nil +} +func (RemoteSocketServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 0} +} + +type RemoteSocketServiceError_SystemError int32 + +const ( + RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0 + RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1 + RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2 + RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3 + RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4 + RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5 + RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6 + RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7 + RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8 + RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9 + RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10 + RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11 + RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11 + RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12 + RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13 + RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14 + RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15 + RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16 + RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17 + RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18 + RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19 + RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20 + RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21 + RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22 + RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23 + RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24 + RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25 + RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26 + RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27 + RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28 + RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29 + RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30 + RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31 + RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32 + RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33 + RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34 + RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35 + RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35 + RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36 + RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37 + RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38 + RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39 + RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40 + RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42 + RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43 + RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44 + RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45 + RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46 + RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47 + RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48 + RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49 + RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50 + RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51 + RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52 + RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53 + RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54 + RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55 + RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56 + RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57 + RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59 + RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60 + RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61 + RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62 + RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63 + RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64 + RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65 + RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66 + RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67 + RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68 + RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69 + RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70 + RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71 + RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72 + RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73 + RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74 + RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75 + RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76 + RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77 + RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78 + RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79 + RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80 + RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81 + RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82 + RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83 + RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84 + RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85 + RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86 + RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87 + RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88 + RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89 + RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90 + RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91 + RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92 + RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93 + RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94 + RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95 + RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95 + RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96 + RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97 + RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98 + RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99 + RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100 + RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101 + RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102 + RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103 + RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104 + RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105 + RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106 + RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107 + RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108 + RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109 + RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110 + RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111 + RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112 + RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113 + RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114 + RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115 + RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116 + RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117 + RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118 + RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119 + RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120 + RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121 + RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122 + RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123 + RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124 + RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125 + RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126 + RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127 + RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128 + RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129 + RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130 + RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131 + RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132 +) + +var RemoteSocketServiceError_SystemError_name = map[int32]string{ + 0: "SYS_SUCCESS", + 1: "SYS_EPERM", + 2: "SYS_ENOENT", + 3: "SYS_ESRCH", + 4: "SYS_EINTR", + 5: "SYS_EIO", + 6: "SYS_ENXIO", + 7: "SYS_E2BIG", + 8: "SYS_ENOEXEC", + 9: "SYS_EBADF", + 10: "SYS_ECHILD", + 11: "SYS_EAGAIN", + // Duplicate value: 11: "SYS_EWOULDBLOCK", + 12: "SYS_ENOMEM", + 13: "SYS_EACCES", + 14: "SYS_EFAULT", + 15: "SYS_ENOTBLK", + 16: "SYS_EBUSY", + 17: "SYS_EEXIST", + 18: "SYS_EXDEV", + 19: "SYS_ENODEV", + 20: "SYS_ENOTDIR", + 21: "SYS_EISDIR", + 22: "SYS_EINVAL", + 23: "SYS_ENFILE", + 24: "SYS_EMFILE", + 25: "SYS_ENOTTY", + 26: "SYS_ETXTBSY", + 27: "SYS_EFBIG", + 28: "SYS_ENOSPC", + 29: "SYS_ESPIPE", + 30: "SYS_EROFS", + 31: "SYS_EMLINK", + 32: "SYS_EPIPE", + 33: "SYS_EDOM", + 34: "SYS_ERANGE", + 35: "SYS_EDEADLK", + // Duplicate value: 35: "SYS_EDEADLOCK", + 36: "SYS_ENAMETOOLONG", + 37: "SYS_ENOLCK", + 38: "SYS_ENOSYS", + 39: "SYS_ENOTEMPTY", + 40: "SYS_ELOOP", + 42: "SYS_ENOMSG", + 43: "SYS_EIDRM", + 44: "SYS_ECHRNG", + 45: "SYS_EL2NSYNC", + 46: "SYS_EL3HLT", + 47: "SYS_EL3RST", + 48: "SYS_ELNRNG", + 49: "SYS_EUNATCH", + 50: "SYS_ENOCSI", + 51: "SYS_EL2HLT", + 52: "SYS_EBADE", + 53: "SYS_EBADR", + 54: "SYS_EXFULL", + 55: "SYS_ENOANO", + 56: "SYS_EBADRQC", + 57: "SYS_EBADSLT", + 59: "SYS_EBFONT", + 60: "SYS_ENOSTR", + 61: "SYS_ENODATA", + 62: "SYS_ETIME", + 63: "SYS_ENOSR", + 64: "SYS_ENONET", + 65: "SYS_ENOPKG", + 66: "SYS_EREMOTE", + 67: "SYS_ENOLINK", + 68: "SYS_EADV", + 69: "SYS_ESRMNT", + 70: "SYS_ECOMM", + 71: "SYS_EPROTO", + 72: "SYS_EMULTIHOP", + 73: "SYS_EDOTDOT", + 74: "SYS_EBADMSG", + 75: "SYS_EOVERFLOW", + 76: "SYS_ENOTUNIQ", + 77: "SYS_EBADFD", + 78: "SYS_EREMCHG", + 79: "SYS_ELIBACC", + 80: "SYS_ELIBBAD", + 81: "SYS_ELIBSCN", + 82: "SYS_ELIBMAX", + 83: "SYS_ELIBEXEC", + 84: "SYS_EILSEQ", + 85: "SYS_ERESTART", + 86: "SYS_ESTRPIPE", + 87: "SYS_EUSERS", + 88: "SYS_ENOTSOCK", + 89: "SYS_EDESTADDRREQ", + 90: "SYS_EMSGSIZE", + 91: "SYS_EPROTOTYPE", + 92: "SYS_ENOPROTOOPT", + 93: "SYS_EPROTONOSUPPORT", + 94: "SYS_ESOCKTNOSUPPORT", + 95: "SYS_EOPNOTSUPP", + // Duplicate value: 95: "SYS_ENOTSUP", + 96: "SYS_EPFNOSUPPORT", + 97: "SYS_EAFNOSUPPORT", + 98: "SYS_EADDRINUSE", + 99: "SYS_EADDRNOTAVAIL", + 100: "SYS_ENETDOWN", + 101: "SYS_ENETUNREACH", + 102: "SYS_ENETRESET", + 103: "SYS_ECONNABORTED", + 104: "SYS_ECONNRESET", + 105: "SYS_ENOBUFS", + 106: "SYS_EISCONN", + 107: "SYS_ENOTCONN", + 108: "SYS_ESHUTDOWN", + 109: "SYS_ETOOMANYREFS", + 110: "SYS_ETIMEDOUT", + 111: "SYS_ECONNREFUSED", + 112: "SYS_EHOSTDOWN", + 113: "SYS_EHOSTUNREACH", + 114: "SYS_EALREADY", + 115: "SYS_EINPROGRESS", + 116: "SYS_ESTALE", + 117: "SYS_EUCLEAN", + 118: "SYS_ENOTNAM", + 119: "SYS_ENAVAIL", + 120: "SYS_EISNAM", + 121: "SYS_EREMOTEIO", + 122: "SYS_EDQUOT", + 123: "SYS_ENOMEDIUM", + 124: "SYS_EMEDIUMTYPE", + 125: "SYS_ECANCELED", + 126: "SYS_ENOKEY", + 127: "SYS_EKEYEXPIRED", + 128: "SYS_EKEYREVOKED", + 129: "SYS_EKEYREJECTED", + 130: "SYS_EOWNERDEAD", + 131: "SYS_ENOTRECOVERABLE", + 132: "SYS_ERFKILL", +} +var RemoteSocketServiceError_SystemError_value = map[string]int32{ + "SYS_SUCCESS": 0, + "SYS_EPERM": 1, + "SYS_ENOENT": 2, + "SYS_ESRCH": 3, + "SYS_EINTR": 4, + "SYS_EIO": 5, + "SYS_ENXIO": 6, + "SYS_E2BIG": 7, + "SYS_ENOEXEC": 8, + "SYS_EBADF": 9, + "SYS_ECHILD": 10, + "SYS_EAGAIN": 11, + "SYS_EWOULDBLOCK": 11, + "SYS_ENOMEM": 12, + "SYS_EACCES": 13, + "SYS_EFAULT": 14, + "SYS_ENOTBLK": 15, + "SYS_EBUSY": 16, + "SYS_EEXIST": 17, + "SYS_EXDEV": 18, + "SYS_ENODEV": 19, + "SYS_ENOTDIR": 20, + "SYS_EISDIR": 21, + "SYS_EINVAL": 22, + "SYS_ENFILE": 23, + "SYS_EMFILE": 24, + "SYS_ENOTTY": 25, + "SYS_ETXTBSY": 26, + "SYS_EFBIG": 27, + "SYS_ENOSPC": 28, + "SYS_ESPIPE": 29, + "SYS_EROFS": 30, + "SYS_EMLINK": 31, + "SYS_EPIPE": 32, + "SYS_EDOM": 33, + "SYS_ERANGE": 34, + "SYS_EDEADLK": 35, + "SYS_EDEADLOCK": 35, + "SYS_ENAMETOOLONG": 36, + "SYS_ENOLCK": 37, + "SYS_ENOSYS": 38, + "SYS_ENOTEMPTY": 39, + "SYS_ELOOP": 40, + "SYS_ENOMSG": 42, + "SYS_EIDRM": 43, + "SYS_ECHRNG": 44, + "SYS_EL2NSYNC": 45, + "SYS_EL3HLT": 46, + "SYS_EL3RST": 47, + "SYS_ELNRNG": 48, + "SYS_EUNATCH": 49, + "SYS_ENOCSI": 50, + "SYS_EL2HLT": 51, + "SYS_EBADE": 52, + "SYS_EBADR": 53, + "SYS_EXFULL": 54, + "SYS_ENOANO": 55, + "SYS_EBADRQC": 56, + "SYS_EBADSLT": 57, + "SYS_EBFONT": 59, + "SYS_ENOSTR": 60, + "SYS_ENODATA": 61, + "SYS_ETIME": 62, + "SYS_ENOSR": 63, + "SYS_ENONET": 64, + "SYS_ENOPKG": 65, + "SYS_EREMOTE": 66, + "SYS_ENOLINK": 67, + "SYS_EADV": 68, + "SYS_ESRMNT": 69, + "SYS_ECOMM": 70, + "SYS_EPROTO": 71, + "SYS_EMULTIHOP": 72, + "SYS_EDOTDOT": 73, + "SYS_EBADMSG": 74, + "SYS_EOVERFLOW": 75, + "SYS_ENOTUNIQ": 76, + "SYS_EBADFD": 77, + "SYS_EREMCHG": 78, + "SYS_ELIBACC": 79, + "SYS_ELIBBAD": 80, + "SYS_ELIBSCN": 81, + "SYS_ELIBMAX": 82, + "SYS_ELIBEXEC": 83, + "SYS_EILSEQ": 84, + "SYS_ERESTART": 85, + "SYS_ESTRPIPE": 86, + "SYS_EUSERS": 87, + "SYS_ENOTSOCK": 88, + "SYS_EDESTADDRREQ": 89, + "SYS_EMSGSIZE": 90, + "SYS_EPROTOTYPE": 91, + "SYS_ENOPROTOOPT": 92, + "SYS_EPROTONOSUPPORT": 93, + "SYS_ESOCKTNOSUPPORT": 94, + "SYS_EOPNOTSUPP": 95, + "SYS_ENOTSUP": 95, + "SYS_EPFNOSUPPORT": 96, + "SYS_EAFNOSUPPORT": 97, + "SYS_EADDRINUSE": 98, + "SYS_EADDRNOTAVAIL": 99, + "SYS_ENETDOWN": 100, + "SYS_ENETUNREACH": 101, + "SYS_ENETRESET": 102, + "SYS_ECONNABORTED": 103, + "SYS_ECONNRESET": 104, + "SYS_ENOBUFS": 105, + "SYS_EISCONN": 106, + "SYS_ENOTCONN": 107, + "SYS_ESHUTDOWN": 108, + "SYS_ETOOMANYREFS": 109, + "SYS_ETIMEDOUT": 110, + "SYS_ECONNREFUSED": 111, + "SYS_EHOSTDOWN": 112, + "SYS_EHOSTUNREACH": 113, + "SYS_EALREADY": 114, + "SYS_EINPROGRESS": 115, + "SYS_ESTALE": 116, + "SYS_EUCLEAN": 117, + "SYS_ENOTNAM": 118, + "SYS_ENAVAIL": 119, + "SYS_EISNAM": 120, + "SYS_EREMOTEIO": 121, + "SYS_EDQUOT": 122, + "SYS_ENOMEDIUM": 123, + "SYS_EMEDIUMTYPE": 124, + "SYS_ECANCELED": 125, + "SYS_ENOKEY": 126, + "SYS_EKEYEXPIRED": 127, + "SYS_EKEYREVOKED": 128, + "SYS_EKEYREJECTED": 129, + "SYS_EOWNERDEAD": 130, + "SYS_ENOTRECOVERABLE": 131, + "SYS_ERFKILL": 132, +} + +func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError { + p := new(RemoteSocketServiceError_SystemError) + *p = x + return p +} +func (x RemoteSocketServiceError_SystemError) String() string { + return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x)) +} +func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError") + if err != nil { + return err + } + *x = RemoteSocketServiceError_SystemError(value) + return nil +} +func (RemoteSocketServiceError_SystemError) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 1} +} + +type CreateSocketRequest_SocketFamily int32 + +const ( + CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1 + CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2 +) + +var CreateSocketRequest_SocketFamily_name = map[int32]string{ + 1: "IPv4", + 2: "IPv6", +} +var CreateSocketRequest_SocketFamily_value = map[string]int32{ + "IPv4": 1, + "IPv6": 2, +} + +func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily { + p := new(CreateSocketRequest_SocketFamily) + *p = x + return p +} +func (x CreateSocketRequest_SocketFamily) String() string { + return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x)) +} +func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily") + if err != nil { + return err + } + *x = CreateSocketRequest_SocketFamily(value) + return nil +} +func (CreateSocketRequest_SocketFamily) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 0} +} + +type CreateSocketRequest_SocketProtocol int32 + +const ( + CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1 + CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2 +) + +var CreateSocketRequest_SocketProtocol_name = map[int32]string{ + 1: "TCP", + 2: "UDP", +} +var CreateSocketRequest_SocketProtocol_value = map[string]int32{ + "TCP": 1, + "UDP": 2, +} + +func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol { + p := new(CreateSocketRequest_SocketProtocol) + *p = x + return p +} +func (x CreateSocketRequest_SocketProtocol) String() string { + return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x)) +} +func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol") + if err != nil { + return err + } + *x = CreateSocketRequest_SocketProtocol(value) + return nil +} +func (CreateSocketRequest_SocketProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 1} +} + +type SocketOption_SocketOptionLevel int32 + +const ( + SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0 + SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1 + SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6 + SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17 +) + +var SocketOption_SocketOptionLevel_name = map[int32]string{ + 0: "SOCKET_SOL_IP", + 1: "SOCKET_SOL_SOCKET", + 6: "SOCKET_SOL_TCP", + 17: "SOCKET_SOL_UDP", +} +var SocketOption_SocketOptionLevel_value = map[string]int32{ + "SOCKET_SOL_IP": 0, + "SOCKET_SOL_SOCKET": 1, + "SOCKET_SOL_TCP": 6, + "SOCKET_SOL_UDP": 17, +} + +func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel { + p := new(SocketOption_SocketOptionLevel) + *p = x + return p +} +func (x SocketOption_SocketOptionLevel) String() string { + return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x)) +} +func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel") + if err != nil { + return err + } + *x = SocketOption_SocketOptionLevel(value) + return nil +} +func (SocketOption_SocketOptionLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 0} +} + +type SocketOption_SocketOptionName int32 + +const ( + SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5 + SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6 + SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7 + SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8 + SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9 + SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10 + SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13 + SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20 + SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21 + SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5 + SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6 + SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7 + SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8 + SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9 + SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10 + SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11 + SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12 +) + +var SocketOption_SocketOptionName_name = map[int32]string{ + 1: "SOCKET_SO_DEBUG", + 2: "SOCKET_SO_REUSEADDR", + 3: "SOCKET_SO_TYPE", + 4: "SOCKET_SO_ERROR", + 5: "SOCKET_SO_DONTROUTE", + 6: "SOCKET_SO_BROADCAST", + 7: "SOCKET_SO_SNDBUF", + 8: "SOCKET_SO_RCVBUF", + 9: "SOCKET_SO_KEEPALIVE", + 10: "SOCKET_SO_OOBINLINE", + 13: "SOCKET_SO_LINGER", + 20: "SOCKET_SO_RCVTIMEO", + 21: "SOCKET_SO_SNDTIMEO", + // Duplicate value: 1: "SOCKET_IP_TOS", + // Duplicate value: 2: "SOCKET_IP_TTL", + // Duplicate value: 3: "SOCKET_IP_HDRINCL", + // Duplicate value: 4: "SOCKET_IP_OPTIONS", + // Duplicate value: 1: "SOCKET_TCP_NODELAY", + // Duplicate value: 2: "SOCKET_TCP_MAXSEG", + // Duplicate value: 3: "SOCKET_TCP_CORK", + // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE", + // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL", + // Duplicate value: 6: "SOCKET_TCP_KEEPCNT", + // Duplicate value: 7: "SOCKET_TCP_SYNCNT", + // Duplicate value: 8: "SOCKET_TCP_LINGER2", + // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT", + // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP", + 11: "SOCKET_TCP_INFO", + 12: "SOCKET_TCP_QUICKACK", +} +var SocketOption_SocketOptionName_value = map[string]int32{ + "SOCKET_SO_DEBUG": 1, + "SOCKET_SO_REUSEADDR": 2, + "SOCKET_SO_TYPE": 3, + "SOCKET_SO_ERROR": 4, + "SOCKET_SO_DONTROUTE": 5, + "SOCKET_SO_BROADCAST": 6, + "SOCKET_SO_SNDBUF": 7, + "SOCKET_SO_RCVBUF": 8, + "SOCKET_SO_KEEPALIVE": 9, + "SOCKET_SO_OOBINLINE": 10, + "SOCKET_SO_LINGER": 13, + "SOCKET_SO_RCVTIMEO": 20, + "SOCKET_SO_SNDTIMEO": 21, + "SOCKET_IP_TOS": 1, + "SOCKET_IP_TTL": 2, + "SOCKET_IP_HDRINCL": 3, + "SOCKET_IP_OPTIONS": 4, + "SOCKET_TCP_NODELAY": 1, + "SOCKET_TCP_MAXSEG": 2, + "SOCKET_TCP_CORK": 3, + "SOCKET_TCP_KEEPIDLE": 4, + "SOCKET_TCP_KEEPINTVL": 5, + "SOCKET_TCP_KEEPCNT": 6, + "SOCKET_TCP_SYNCNT": 7, + "SOCKET_TCP_LINGER2": 8, + "SOCKET_TCP_DEFER_ACCEPT": 9, + "SOCKET_TCP_WINDOW_CLAMP": 10, + "SOCKET_TCP_INFO": 11, + "SOCKET_TCP_QUICKACK": 12, +} + +func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName { + p := new(SocketOption_SocketOptionName) + *p = x + return p +} +func (x SocketOption_SocketOptionName) String() string { + return proto.EnumName(SocketOption_SocketOptionName_name, int32(x)) +} +func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName") + if err != nil { + return err + } + *x = SocketOption_SocketOptionName(value) + return nil +} +func (SocketOption_SocketOptionName) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 1} +} + +type ShutDownRequest_How int32 + +const ( + ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1 + ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2 + ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3 +) + +var ShutDownRequest_How_name = map[int32]string{ + 1: "SOCKET_SHUT_RD", + 2: "SOCKET_SHUT_WR", + 3: "SOCKET_SHUT_RDWR", +} +var ShutDownRequest_How_value = map[string]int32{ + "SOCKET_SHUT_RD": 1, + "SOCKET_SHUT_WR": 2, + "SOCKET_SHUT_RDWR": 3, +} + +func (x ShutDownRequest_How) Enum() *ShutDownRequest_How { + p := new(ShutDownRequest_How) + *p = x + return p +} +func (x ShutDownRequest_How) String() string { + return proto.EnumName(ShutDownRequest_How_name, int32(x)) +} +func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How") + if err != nil { + return err + } + *x = ShutDownRequest_How(value) + return nil +} +func (ShutDownRequest_How) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{21, 0} +} + +type ReceiveRequest_Flags int32 + +const ( + ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1 + ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2 +) + +var ReceiveRequest_Flags_name = map[int32]string{ + 1: "MSG_OOB", + 2: "MSG_PEEK", +} +var ReceiveRequest_Flags_value = map[string]int32{ + "MSG_OOB": 1, + "MSG_PEEK": 2, +} + +func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags { + p := new(ReceiveRequest_Flags) + *p = x + return p +} +func (x ReceiveRequest_Flags) String() string { + return proto.EnumName(ReceiveRequest_Flags_name, int32(x)) +} +func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags") + if err != nil { + return err + } + *x = ReceiveRequest_Flags(value) + return nil +} +func (ReceiveRequest_Flags) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{27, 0} +} + +type PollEvent_PollEventFlag int32 + +const ( + PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0 + PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1 + PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2 + PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4 + PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8 + PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16 + PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32 + PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64 + PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128 + PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256 + PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512 + PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024 + PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096 + PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192 +) + +var PollEvent_PollEventFlag_name = map[int32]string{ + 0: "SOCKET_POLLNONE", + 1: "SOCKET_POLLIN", + 2: "SOCKET_POLLPRI", + 4: "SOCKET_POLLOUT", + 8: "SOCKET_POLLERR", + 16: "SOCKET_POLLHUP", + 32: "SOCKET_POLLNVAL", + 64: "SOCKET_POLLRDNORM", + 128: "SOCKET_POLLRDBAND", + 256: "SOCKET_POLLWRNORM", + 512: "SOCKET_POLLWRBAND", + 1024: "SOCKET_POLLMSG", + 4096: "SOCKET_POLLREMOVE", + 8192: "SOCKET_POLLRDHUP", +} +var PollEvent_PollEventFlag_value = map[string]int32{ + "SOCKET_POLLNONE": 0, + "SOCKET_POLLIN": 1, + "SOCKET_POLLPRI": 2, + "SOCKET_POLLOUT": 4, + "SOCKET_POLLERR": 8, + "SOCKET_POLLHUP": 16, + "SOCKET_POLLNVAL": 32, + "SOCKET_POLLRDNORM": 64, + "SOCKET_POLLRDBAND": 128, + "SOCKET_POLLWRNORM": 256, + "SOCKET_POLLWRBAND": 512, + "SOCKET_POLLMSG": 1024, + "SOCKET_POLLREMOVE": 4096, + "SOCKET_POLLRDHUP": 8192, +} + +func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag { + p := new(PollEvent_PollEventFlag) + *p = x + return p +} +func (x PollEvent_PollEventFlag) String() string { + return proto.EnumName(PollEvent_PollEventFlag_name, int32(x)) +} +func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag") + if err != nil { + return err + } + *x = PollEvent_PollEventFlag(value) + return nil +} +func (PollEvent_PollEventFlag) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{29, 0} +} + +type ResolveReply_ErrorCode int32 + +const ( + ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1 + ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2 + ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3 + ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4 + ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5 + ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6 + ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7 + ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8 + ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9 + ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10 + ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11 + ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12 + ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13 + ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14 + ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15 +) + +var ResolveReply_ErrorCode_name = map[int32]string{ + 1: "SOCKET_EAI_ADDRFAMILY", + 2: "SOCKET_EAI_AGAIN", + 3: "SOCKET_EAI_BADFLAGS", + 4: "SOCKET_EAI_FAIL", + 5: "SOCKET_EAI_FAMILY", + 6: "SOCKET_EAI_MEMORY", + 7: "SOCKET_EAI_NODATA", + 8: "SOCKET_EAI_NONAME", + 9: "SOCKET_EAI_SERVICE", + 10: "SOCKET_EAI_SOCKTYPE", + 11: "SOCKET_EAI_SYSTEM", + 12: "SOCKET_EAI_BADHINTS", + 13: "SOCKET_EAI_PROTOCOL", + 14: "SOCKET_EAI_OVERFLOW", + 15: "SOCKET_EAI_MAX", +} +var ResolveReply_ErrorCode_value = map[string]int32{ + "SOCKET_EAI_ADDRFAMILY": 1, + "SOCKET_EAI_AGAIN": 2, + "SOCKET_EAI_BADFLAGS": 3, + "SOCKET_EAI_FAIL": 4, + "SOCKET_EAI_FAMILY": 5, + "SOCKET_EAI_MEMORY": 6, + "SOCKET_EAI_NODATA": 7, + "SOCKET_EAI_NONAME": 8, + "SOCKET_EAI_SERVICE": 9, + "SOCKET_EAI_SOCKTYPE": 10, + "SOCKET_EAI_SYSTEM": 11, + "SOCKET_EAI_BADHINTS": 12, + "SOCKET_EAI_PROTOCOL": 13, + "SOCKET_EAI_OVERFLOW": 14, + "SOCKET_EAI_MAX": 15, +} + +func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode { + p := new(ResolveReply_ErrorCode) + *p = x + return p +} +func (x ResolveReply_ErrorCode) String() string { + return proto.EnumName(ResolveReply_ErrorCode_name, int32(x)) +} +func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode") + if err != nil { + return err + } + *x = ResolveReply_ErrorCode(value) + return nil +} +func (ResolveReply_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{33, 0} +} + +type RemoteSocketServiceError struct { + SystemError *int32 `protobuf:"varint,1,opt,name=system_error,json=systemError,def=0" json:"system_error,omitempty"` + ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail,json=errorDetail" json:"error_detail,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} } +func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) } +func (*RemoteSocketServiceError) ProtoMessage() {} +func (*RemoteSocketServiceError) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{0} +} +func (m *RemoteSocketServiceError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RemoteSocketServiceError.Unmarshal(m, b) +} +func (m *RemoteSocketServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RemoteSocketServiceError.Marshal(b, m, deterministic) +} +func (dst *RemoteSocketServiceError) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoteSocketServiceError.Merge(dst, src) +} +func (m *RemoteSocketServiceError) XXX_Size() int { + return xxx_messageInfo_RemoteSocketServiceError.Size(m) +} +func (m *RemoteSocketServiceError) XXX_DiscardUnknown() { + xxx_messageInfo_RemoteSocketServiceError.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoteSocketServiceError proto.InternalMessageInfo + +const Default_RemoteSocketServiceError_SystemError int32 = 0 + +func (m *RemoteSocketServiceError) GetSystemError() int32 { + if m != nil && m.SystemError != nil { + return *m.SystemError + } + return Default_RemoteSocketServiceError_SystemError +} + +func (m *RemoteSocketServiceError) GetErrorDetail() string { + if m != nil && m.ErrorDetail != nil { + return *m.ErrorDetail + } + return "" +} + +type AddressPort struct { + Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"` + PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` + HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint,json=hostnameHint" json:"hostname_hint,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddressPort) Reset() { *m = AddressPort{} } +func (m *AddressPort) String() string { return proto.CompactTextString(m) } +func (*AddressPort) ProtoMessage() {} +func (*AddressPort) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{1} +} +func (m *AddressPort) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddressPort.Unmarshal(m, b) +} +func (m *AddressPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddressPort.Marshal(b, m, deterministic) +} +func (dst *AddressPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddressPort.Merge(dst, src) +} +func (m *AddressPort) XXX_Size() int { + return xxx_messageInfo_AddressPort.Size(m) +} +func (m *AddressPort) XXX_DiscardUnknown() { + xxx_messageInfo_AddressPort.DiscardUnknown(m) +} + +var xxx_messageInfo_AddressPort proto.InternalMessageInfo + +func (m *AddressPort) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return 0 +} + +func (m *AddressPort) GetPackedAddress() []byte { + if m != nil { + return m.PackedAddress + } + return nil +} + +func (m *AddressPort) GetHostnameHint() string { + if m != nil && m.HostnameHint != nil { + return *m.HostnameHint + } + return "" +} + +type CreateSocketRequest struct { + Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"` + Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"` + SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions" json:"socket_options,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,json=listenBacklog,def=0" json:"listen_backlog,omitempty"` + RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` + AppId *string `protobuf:"bytes,9,opt,name=app_id,json=appId" json:"app_id,omitempty"` + ProjectId *int64 `protobuf:"varint,10,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} } +func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSocketRequest) ProtoMessage() {} +func (*CreateSocketRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{2} +} +func (m *CreateSocketRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSocketRequest.Unmarshal(m, b) +} +func (m *CreateSocketRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSocketRequest.Marshal(b, m, deterministic) +} +func (dst *CreateSocketRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSocketRequest.Merge(dst, src) +} +func (m *CreateSocketRequest) XXX_Size() int { + return xxx_messageInfo_CreateSocketRequest.Size(m) +} +func (m *CreateSocketRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSocketRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSocketRequest proto.InternalMessageInfo + +const Default_CreateSocketRequest_ListenBacklog int32 = 0 + +func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily { + if m != nil && m.Family != nil { + return *m.Family + } + return CreateSocketRequest_IPv4 +} + +func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol { + if m != nil && m.Protocol != nil { + return *m.Protocol + } + return CreateSocketRequest_TCP +} + +func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption { + if m != nil { + return m.SocketOptions + } + return nil +} + +func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +func (m *CreateSocketRequest) GetListenBacklog() int32 { + if m != nil && m.ListenBacklog != nil { + return *m.ListenBacklog + } + return Default_CreateSocketRequest_ListenBacklog +} + +func (m *CreateSocketRequest) GetRemoteIp() *AddressPort { + if m != nil { + return m.RemoteIp + } + return nil +} + +func (m *CreateSocketRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *CreateSocketRequest) GetProjectId() int64 { + if m != nil && m.ProjectId != nil { + return *m.ProjectId + } + return 0 +} + +type CreateSocketReply struct { + SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address,json=serverAddress" json:"server_address,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} } +func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) } +func (*CreateSocketReply) ProtoMessage() {} +func (*CreateSocketReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{3} +} + +var extRange_CreateSocketReply = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_CreateSocketReply +} +func (m *CreateSocketReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSocketReply.Unmarshal(m, b) +} +func (m *CreateSocketReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSocketReply.Marshal(b, m, deterministic) +} +func (dst *CreateSocketReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSocketReply.Merge(dst, src) +} +func (m *CreateSocketReply) XXX_Size() int { + return xxx_messageInfo_CreateSocketReply.Size(m) +} +func (m *CreateSocketReply) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSocketReply.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSocketReply proto.InternalMessageInfo + +func (m *CreateSocketReply) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *CreateSocketReply) GetServerAddress() *AddressPort { + if m != nil { + return m.ServerAddress + } + return nil +} + +func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type BindRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BindRequest) Reset() { *m = BindRequest{} } +func (m *BindRequest) String() string { return proto.CompactTextString(m) } +func (*BindRequest) ProtoMessage() {} +func (*BindRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{4} +} +func (m *BindRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BindRequest.Unmarshal(m, b) +} +func (m *BindRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BindRequest.Marshal(b, m, deterministic) +} +func (dst *BindRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BindRequest.Merge(dst, src) +} +func (m *BindRequest) XXX_Size() int { + return xxx_messageInfo_BindRequest.Size(m) +} +func (m *BindRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BindRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BindRequest proto.InternalMessageInfo + +func (m *BindRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *BindRequest) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type BindReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BindReply) Reset() { *m = BindReply{} } +func (m *BindReply) String() string { return proto.CompactTextString(m) } +func (*BindReply) ProtoMessage() {} +func (*BindReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{5} +} +func (m *BindReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BindReply.Unmarshal(m, b) +} +func (m *BindReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BindReply.Marshal(b, m, deterministic) +} +func (dst *BindReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_BindReply.Merge(dst, src) +} +func (m *BindReply) XXX_Size() int { + return xxx_messageInfo_BindReply.Size(m) +} +func (m *BindReply) XXX_DiscardUnknown() { + xxx_messageInfo_BindReply.DiscardUnknown(m) +} + +var xxx_messageInfo_BindReply proto.InternalMessageInfo + +func (m *BindReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type GetSocketNameRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} } +func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetSocketNameRequest) ProtoMessage() {} +func (*GetSocketNameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{6} +} +func (m *GetSocketNameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSocketNameRequest.Unmarshal(m, b) +} +func (m *GetSocketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSocketNameRequest.Marshal(b, m, deterministic) +} +func (dst *GetSocketNameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSocketNameRequest.Merge(dst, src) +} +func (m *GetSocketNameRequest) XXX_Size() int { + return xxx_messageInfo_GetSocketNameRequest.Size(m) +} +func (m *GetSocketNameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSocketNameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSocketNameRequest proto.InternalMessageInfo + +func (m *GetSocketNameRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +type GetSocketNameReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} } +func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) } +func (*GetSocketNameReply) ProtoMessage() {} +func (*GetSocketNameReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{7} +} +func (m *GetSocketNameReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSocketNameReply.Unmarshal(m, b) +} +func (m *GetSocketNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSocketNameReply.Marshal(b, m, deterministic) +} +func (dst *GetSocketNameReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSocketNameReply.Merge(dst, src) +} +func (m *GetSocketNameReply) XXX_Size() int { + return xxx_messageInfo_GetSocketNameReply.Size(m) +} +func (m *GetSocketNameReply) XXX_DiscardUnknown() { + xxx_messageInfo_GetSocketNameReply.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSocketNameReply proto.InternalMessageInfo + +func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type GetPeerNameRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} } +func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetPeerNameRequest) ProtoMessage() {} +func (*GetPeerNameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{8} +} +func (m *GetPeerNameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPeerNameRequest.Unmarshal(m, b) +} +func (m *GetPeerNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPeerNameRequest.Marshal(b, m, deterministic) +} +func (dst *GetPeerNameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPeerNameRequest.Merge(dst, src) +} +func (m *GetPeerNameRequest) XXX_Size() int { + return xxx_messageInfo_GetPeerNameRequest.Size(m) +} +func (m *GetPeerNameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPeerNameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPeerNameRequest proto.InternalMessageInfo + +func (m *GetPeerNameRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +type GetPeerNameReply struct { + PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip,json=peerIp" json:"peer_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} } +func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) } +func (*GetPeerNameReply) ProtoMessage() {} +func (*GetPeerNameReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{9} +} +func (m *GetPeerNameReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPeerNameReply.Unmarshal(m, b) +} +func (m *GetPeerNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPeerNameReply.Marshal(b, m, deterministic) +} +func (dst *GetPeerNameReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPeerNameReply.Merge(dst, src) +} +func (m *GetPeerNameReply) XXX_Size() int { + return xxx_messageInfo_GetPeerNameReply.Size(m) +} +func (m *GetPeerNameReply) XXX_DiscardUnknown() { + xxx_messageInfo_GetPeerNameReply.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPeerNameReply proto.InternalMessageInfo + +func (m *GetPeerNameReply) GetPeerIp() *AddressPort { + if m != nil { + return m.PeerIp + } + return nil +} + +type SocketOption struct { + Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"` + Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"` + Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SocketOption) Reset() { *m = SocketOption{} } +func (m *SocketOption) String() string { return proto.CompactTextString(m) } +func (*SocketOption) ProtoMessage() {} +func (*SocketOption) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{10} +} +func (m *SocketOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SocketOption.Unmarshal(m, b) +} +func (m *SocketOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SocketOption.Marshal(b, m, deterministic) +} +func (dst *SocketOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_SocketOption.Merge(dst, src) +} +func (m *SocketOption) XXX_Size() int { + return xxx_messageInfo_SocketOption.Size(m) +} +func (m *SocketOption) XXX_DiscardUnknown() { + xxx_messageInfo_SocketOption.DiscardUnknown(m) +} + +var xxx_messageInfo_SocketOption proto.InternalMessageInfo + +func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel { + if m != nil && m.Level != nil { + return *m.Level + } + return SocketOption_SOCKET_SOL_IP +} + +func (m *SocketOption) GetOption() SocketOption_SocketOptionName { + if m != nil && m.Option != nil { + return *m.Option + } + return SocketOption_SOCKET_SO_DEBUG +} + +func (m *SocketOption) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type SetSocketOptionsRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} } +func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } +func (*SetSocketOptionsRequest) ProtoMessage() {} +func (*SetSocketOptionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{11} +} +func (m *SetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetSocketOptionsRequest.Unmarshal(m, b) +} +func (m *SetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetSocketOptionsRequest.Marshal(b, m, deterministic) +} +func (dst *SetSocketOptionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetSocketOptionsRequest.Merge(dst, src) +} +func (m *SetSocketOptionsRequest) XXX_Size() int { + return xxx_messageInfo_SetSocketOptionsRequest.Size(m) +} +func (m *SetSocketOptionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetSocketOptionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetSocketOptionsRequest proto.InternalMessageInfo + +func (m *SetSocketOptionsRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type SetSocketOptionsReply struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} } +func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) } +func (*SetSocketOptionsReply) ProtoMessage() {} +func (*SetSocketOptionsReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{12} +} +func (m *SetSocketOptionsReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetSocketOptionsReply.Unmarshal(m, b) +} +func (m *SetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetSocketOptionsReply.Marshal(b, m, deterministic) +} +func (dst *SetSocketOptionsReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetSocketOptionsReply.Merge(dst, src) +} +func (m *SetSocketOptionsReply) XXX_Size() int { + return xxx_messageInfo_SetSocketOptionsReply.Size(m) +} +func (m *SetSocketOptionsReply) XXX_DiscardUnknown() { + xxx_messageInfo_SetSocketOptionsReply.DiscardUnknown(m) +} + +var xxx_messageInfo_SetSocketOptionsReply proto.InternalMessageInfo + +type GetSocketOptionsRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} } +func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetSocketOptionsRequest) ProtoMessage() {} +func (*GetSocketOptionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{13} +} +func (m *GetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSocketOptionsRequest.Unmarshal(m, b) +} +func (m *GetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSocketOptionsRequest.Marshal(b, m, deterministic) +} +func (dst *GetSocketOptionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSocketOptionsRequest.Merge(dst, src) +} +func (m *GetSocketOptionsRequest) XXX_Size() int { + return xxx_messageInfo_GetSocketOptionsRequest.Size(m) +} +func (m *GetSocketOptionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSocketOptionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSocketOptionsRequest proto.InternalMessageInfo + +func (m *GetSocketOptionsRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type GetSocketOptionsReply struct { + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} } +func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) } +func (*GetSocketOptionsReply) ProtoMessage() {} +func (*GetSocketOptionsReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{14} +} +func (m *GetSocketOptionsReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSocketOptionsReply.Unmarshal(m, b) +} +func (m *GetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSocketOptionsReply.Marshal(b, m, deterministic) +} +func (dst *GetSocketOptionsReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSocketOptionsReply.Merge(dst, src) +} +func (m *GetSocketOptionsReply) XXX_Size() int { + return xxx_messageInfo_GetSocketOptionsReply.Size(m) +} +func (m *GetSocketOptionsReply) XXX_DiscardUnknown() { + xxx_messageInfo_GetSocketOptionsReply.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSocketOptionsReply proto.InternalMessageInfo + +func (m *GetSocketOptionsReply) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type ConnectRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } +func (m *ConnectRequest) String() string { return proto.CompactTextString(m) } +func (*ConnectRequest) ProtoMessage() {} +func (*ConnectRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{15} +} +func (m *ConnectRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnectRequest.Unmarshal(m, b) +} +func (m *ConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnectRequest.Marshal(b, m, deterministic) +} +func (dst *ConnectRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectRequest.Merge(dst, src) +} +func (m *ConnectRequest) XXX_Size() int { + return xxx_messageInfo_ConnectRequest.Size(m) +} +func (m *ConnectRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectRequest proto.InternalMessageInfo + +const Default_ConnectRequest_TimeoutSeconds float64 = -1 + +func (m *ConnectRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ConnectRequest) GetRemoteIp() *AddressPort { + if m != nil { + return m.RemoteIp + } + return nil +} + +func (m *ConnectRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_ConnectRequest_TimeoutSeconds +} + +type ConnectReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnectReply) Reset() { *m = ConnectReply{} } +func (m *ConnectReply) String() string { return proto.CompactTextString(m) } +func (*ConnectReply) ProtoMessage() {} +func (*ConnectReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{16} +} + +var extRange_ConnectReply = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ConnectReply +} +func (m *ConnectReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnectReply.Unmarshal(m, b) +} +func (m *ConnectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnectReply.Marshal(b, m, deterministic) +} +func (dst *ConnectReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectReply.Merge(dst, src) +} +func (m *ConnectReply) XXX_Size() int { + return xxx_messageInfo_ConnectReply.Size(m) +} +func (m *ConnectReply) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectReply proto.InternalMessageInfo + +func (m *ConnectReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type ListenRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListenRequest) Reset() { *m = ListenRequest{} } +func (m *ListenRequest) String() string { return proto.CompactTextString(m) } +func (*ListenRequest) ProtoMessage() {} +func (*ListenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{17} +} +func (m *ListenRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListenRequest.Unmarshal(m, b) +} +func (m *ListenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListenRequest.Marshal(b, m, deterministic) +} +func (dst *ListenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListenRequest.Merge(dst, src) +} +func (m *ListenRequest) XXX_Size() int { + return xxx_messageInfo_ListenRequest.Size(m) +} +func (m *ListenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListenRequest proto.InternalMessageInfo + +func (m *ListenRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ListenRequest) GetBacklog() int32 { + if m != nil && m.Backlog != nil { + return *m.Backlog + } + return 0 +} + +type ListenReply struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListenReply) Reset() { *m = ListenReply{} } +func (m *ListenReply) String() string { return proto.CompactTextString(m) } +func (*ListenReply) ProtoMessage() {} +func (*ListenReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{18} +} +func (m *ListenReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListenReply.Unmarshal(m, b) +} +func (m *ListenReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListenReply.Marshal(b, m, deterministic) +} +func (dst *ListenReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListenReply.Merge(dst, src) +} +func (m *ListenReply) XXX_Size() int { + return xxx_messageInfo_ListenReply.Size(m) +} +func (m *ListenReply) XXX_DiscardUnknown() { + xxx_messageInfo_ListenReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ListenReply proto.InternalMessageInfo + +type AcceptRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AcceptRequest) Reset() { *m = AcceptRequest{} } +func (m *AcceptRequest) String() string { return proto.CompactTextString(m) } +func (*AcceptRequest) ProtoMessage() {} +func (*AcceptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{19} +} +func (m *AcceptRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AcceptRequest.Unmarshal(m, b) +} +func (m *AcceptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AcceptRequest.Marshal(b, m, deterministic) +} +func (dst *AcceptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AcceptRequest.Merge(dst, src) +} +func (m *AcceptRequest) XXX_Size() int { + return xxx_messageInfo_AcceptRequest.Size(m) +} +func (m *AcceptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AcceptRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AcceptRequest proto.InternalMessageInfo + +const Default_AcceptRequest_TimeoutSeconds float64 = -1 + +func (m *AcceptRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *AcceptRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_AcceptRequest_TimeoutSeconds +} + +type AcceptReply struct { + NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor,json=newSocketDescriptor" json:"new_socket_descriptor,omitempty"` + RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address,json=remoteAddress" json:"remote_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AcceptReply) Reset() { *m = AcceptReply{} } +func (m *AcceptReply) String() string { return proto.CompactTextString(m) } +func (*AcceptReply) ProtoMessage() {} +func (*AcceptReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{20} +} +func (m *AcceptReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AcceptReply.Unmarshal(m, b) +} +func (m *AcceptReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AcceptReply.Marshal(b, m, deterministic) +} +func (dst *AcceptReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_AcceptReply.Merge(dst, src) +} +func (m *AcceptReply) XXX_Size() int { + return xxx_messageInfo_AcceptReply.Size(m) +} +func (m *AcceptReply) XXX_DiscardUnknown() { + xxx_messageInfo_AcceptReply.DiscardUnknown(m) +} + +var xxx_messageInfo_AcceptReply proto.InternalMessageInfo + +func (m *AcceptReply) GetNewSocketDescriptor() []byte { + if m != nil { + return m.NewSocketDescriptor + } + return nil +} + +func (m *AcceptReply) GetRemoteAddress() *AddressPort { + if m != nil { + return m.RemoteAddress + } + return nil +} + +type ShutDownRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"` + SendOffset *int64 `protobuf:"varint,3,req,name=send_offset,json=sendOffset" json:"send_offset,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} } +func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) } +func (*ShutDownRequest) ProtoMessage() {} +func (*ShutDownRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{21} +} +func (m *ShutDownRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShutDownRequest.Unmarshal(m, b) +} +func (m *ShutDownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShutDownRequest.Marshal(b, m, deterministic) +} +func (dst *ShutDownRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShutDownRequest.Merge(dst, src) +} +func (m *ShutDownRequest) XXX_Size() int { + return xxx_messageInfo_ShutDownRequest.Size(m) +} +func (m *ShutDownRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ShutDownRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ShutDownRequest proto.InternalMessageInfo + +func (m *ShutDownRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ShutDownRequest) GetHow() ShutDownRequest_How { + if m != nil && m.How != nil { + return *m.How + } + return ShutDownRequest_SOCKET_SHUT_RD +} + +func (m *ShutDownRequest) GetSendOffset() int64 { + if m != nil && m.SendOffset != nil { + return *m.SendOffset + } + return 0 +} + +type ShutDownReply struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShutDownReply) Reset() { *m = ShutDownReply{} } +func (m *ShutDownReply) String() string { return proto.CompactTextString(m) } +func (*ShutDownReply) ProtoMessage() {} +func (*ShutDownReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{22} +} +func (m *ShutDownReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShutDownReply.Unmarshal(m, b) +} +func (m *ShutDownReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShutDownReply.Marshal(b, m, deterministic) +} +func (dst *ShutDownReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShutDownReply.Merge(dst, src) +} +func (m *ShutDownReply) XXX_Size() int { + return xxx_messageInfo_ShutDownReply.Size(m) +} +func (m *ShutDownReply) XXX_DiscardUnknown() { + xxx_messageInfo_ShutDownReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ShutDownReply proto.InternalMessageInfo + +type CloseRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,json=sendOffset,def=-1" json:"send_offset,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseRequest) Reset() { *m = CloseRequest{} } +func (m *CloseRequest) String() string { return proto.CompactTextString(m) } +func (*CloseRequest) ProtoMessage() {} +func (*CloseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{23} +} +func (m *CloseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseRequest.Unmarshal(m, b) +} +func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic) +} +func (dst *CloseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseRequest.Merge(dst, src) +} +func (m *CloseRequest) XXX_Size() int { + return xxx_messageInfo_CloseRequest.Size(m) +} +func (m *CloseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CloseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseRequest proto.InternalMessageInfo + +const Default_CloseRequest_SendOffset int64 = -1 + +func (m *CloseRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *CloseRequest) GetSendOffset() int64 { + if m != nil && m.SendOffset != nil { + return *m.SendOffset + } + return Default_CloseRequest_SendOffset +} + +type CloseReply struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseReply) Reset() { *m = CloseReply{} } +func (m *CloseReply) String() string { return proto.CompactTextString(m) } +func (*CloseReply) ProtoMessage() {} +func (*CloseReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{24} +} +func (m *CloseReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseReply.Unmarshal(m, b) +} +func (m *CloseReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseReply.Marshal(b, m, deterministic) +} +func (dst *CloseReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseReply.Merge(dst, src) +} +func (m *CloseReply) XXX_Size() int { + return xxx_messageInfo_CloseReply.Size(m) +} +func (m *CloseReply) XXX_DiscardUnknown() { + xxx_messageInfo_CloseReply.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseReply proto.InternalMessageInfo + +type SendRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"` + StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` + Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"` + SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to,json=sendTo" json:"send_to,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SendRequest) Reset() { *m = SendRequest{} } +func (m *SendRequest) String() string { return proto.CompactTextString(m) } +func (*SendRequest) ProtoMessage() {} +func (*SendRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{25} +} +func (m *SendRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SendRequest.Unmarshal(m, b) +} +func (m *SendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SendRequest.Marshal(b, m, deterministic) +} +func (dst *SendRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SendRequest.Merge(dst, src) +} +func (m *SendRequest) XXX_Size() int { + return xxx_messageInfo_SendRequest.Size(m) +} +func (m *SendRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SendRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SendRequest proto.InternalMessageInfo + +const Default_SendRequest_Flags int32 = 0 +const Default_SendRequest_TimeoutSeconds float64 = -1 + +func (m *SendRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *SendRequest) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *SendRequest) GetStreamOffset() int64 { + if m != nil && m.StreamOffset != nil { + return *m.StreamOffset + } + return 0 +} + +func (m *SendRequest) GetFlags() int32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return Default_SendRequest_Flags +} + +func (m *SendRequest) GetSendTo() *AddressPort { + if m != nil { + return m.SendTo + } + return nil +} + +func (m *SendRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_SendRequest_TimeoutSeconds +} + +type SendReply struct { + DataSent *int32 `protobuf:"varint,1,opt,name=data_sent,json=dataSent" json:"data_sent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SendReply) Reset() { *m = SendReply{} } +func (m *SendReply) String() string { return proto.CompactTextString(m) } +func (*SendReply) ProtoMessage() {} +func (*SendReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{26} +} +func (m *SendReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SendReply.Unmarshal(m, b) +} +func (m *SendReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SendReply.Marshal(b, m, deterministic) +} +func (dst *SendReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_SendReply.Merge(dst, src) +} +func (m *SendReply) XXX_Size() int { + return xxx_messageInfo_SendReply.Size(m) +} +func (m *SendReply) XXX_DiscardUnknown() { + xxx_messageInfo_SendReply.DiscardUnknown(m) +} + +var xxx_messageInfo_SendReply proto.InternalMessageInfo + +func (m *SendReply) GetDataSent() int32 { + if m != nil && m.DataSent != nil { + return *m.DataSent + } + return 0 +} + +type ReceiveRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + DataSize *int32 `protobuf:"varint,2,req,name=data_size,json=dataSize" json:"data_size,omitempty"` + Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} } +func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) } +func (*ReceiveRequest) ProtoMessage() {} +func (*ReceiveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{27} +} +func (m *ReceiveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReceiveRequest.Unmarshal(m, b) +} +func (m *ReceiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReceiveRequest.Marshal(b, m, deterministic) +} +func (dst *ReceiveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReceiveRequest.Merge(dst, src) +} +func (m *ReceiveRequest) XXX_Size() int { + return xxx_messageInfo_ReceiveRequest.Size(m) +} +func (m *ReceiveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReceiveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReceiveRequest proto.InternalMessageInfo + +const Default_ReceiveRequest_Flags int32 = 0 +const Default_ReceiveRequest_TimeoutSeconds float64 = -1 + +func (m *ReceiveRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ReceiveRequest) GetDataSize() int32 { + if m != nil && m.DataSize != nil { + return *m.DataSize + } + return 0 +} + +func (m *ReceiveRequest) GetFlags() int32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return Default_ReceiveRequest_Flags +} + +func (m *ReceiveRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_ReceiveRequest_TimeoutSeconds +} + +type ReceiveReply struct { + StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` + ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from,json=receivedFrom" json:"received_from,omitempty"` + BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size,json=bufferSize" json:"buffer_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReceiveReply) Reset() { *m = ReceiveReply{} } +func (m *ReceiveReply) String() string { return proto.CompactTextString(m) } +func (*ReceiveReply) ProtoMessage() {} +func (*ReceiveReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{28} +} +func (m *ReceiveReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReceiveReply.Unmarshal(m, b) +} +func (m *ReceiveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReceiveReply.Marshal(b, m, deterministic) +} +func (dst *ReceiveReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReceiveReply.Merge(dst, src) +} +func (m *ReceiveReply) XXX_Size() int { + return xxx_messageInfo_ReceiveReply.Size(m) +} +func (m *ReceiveReply) XXX_DiscardUnknown() { + xxx_messageInfo_ReceiveReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ReceiveReply proto.InternalMessageInfo + +func (m *ReceiveReply) GetStreamOffset() int64 { + if m != nil && m.StreamOffset != nil { + return *m.StreamOffset + } + return 0 +} + +func (m *ReceiveReply) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ReceiveReply) GetReceivedFrom() *AddressPort { + if m != nil { + return m.ReceivedFrom + } + return nil +} + +func (m *ReceiveReply) GetBufferSize() int32 { + if m != nil && m.BufferSize != nil { + return *m.BufferSize + } + return 0 +} + +type PollEvent struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events,json=requestedEvents" json:"requested_events,omitempty"` + ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events,json=observedEvents" json:"observed_events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PollEvent) Reset() { *m = PollEvent{} } +func (m *PollEvent) String() string { return proto.CompactTextString(m) } +func (*PollEvent) ProtoMessage() {} +func (*PollEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{29} +} +func (m *PollEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PollEvent.Unmarshal(m, b) +} +func (m *PollEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PollEvent.Marshal(b, m, deterministic) +} +func (dst *PollEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_PollEvent.Merge(dst, src) +} +func (m *PollEvent) XXX_Size() int { + return xxx_messageInfo_PollEvent.Size(m) +} +func (m *PollEvent) XXX_DiscardUnknown() { + xxx_messageInfo_PollEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_PollEvent proto.InternalMessageInfo + +func (m *PollEvent) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *PollEvent) GetRequestedEvents() int32 { + if m != nil && m.RequestedEvents != nil { + return *m.RequestedEvents + } + return 0 +} + +func (m *PollEvent) GetObservedEvents() int32 { + if m != nil && m.ObservedEvents != nil { + return *m.ObservedEvents + } + return 0 +} + +type PollRequest struct { + Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PollRequest) Reset() { *m = PollRequest{} } +func (m *PollRequest) String() string { return proto.CompactTextString(m) } +func (*PollRequest) ProtoMessage() {} +func (*PollRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{30} +} +func (m *PollRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PollRequest.Unmarshal(m, b) +} +func (m *PollRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PollRequest.Marshal(b, m, deterministic) +} +func (dst *PollRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PollRequest.Merge(dst, src) +} +func (m *PollRequest) XXX_Size() int { + return xxx_messageInfo_PollRequest.Size(m) +} +func (m *PollRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PollRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PollRequest proto.InternalMessageInfo + +const Default_PollRequest_TimeoutSeconds float64 = -1 + +func (m *PollRequest) GetEvents() []*PollEvent { + if m != nil { + return m.Events + } + return nil +} + +func (m *PollRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_PollRequest_TimeoutSeconds +} + +type PollReply struct { + Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PollReply) Reset() { *m = PollReply{} } +func (m *PollReply) String() string { return proto.CompactTextString(m) } +func (*PollReply) ProtoMessage() {} +func (*PollReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{31} +} +func (m *PollReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PollReply.Unmarshal(m, b) +} +func (m *PollReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PollReply.Marshal(b, m, deterministic) +} +func (dst *PollReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_PollReply.Merge(dst, src) +} +func (m *PollReply) XXX_Size() int { + return xxx_messageInfo_PollReply.Size(m) +} +func (m *PollReply) XXX_DiscardUnknown() { + xxx_messageInfo_PollReply.DiscardUnknown(m) +} + +var xxx_messageInfo_PollReply proto.InternalMessageInfo + +func (m *PollReply) GetEvents() []*PollEvent { + if m != nil { + return m.Events + } + return nil +} + +type ResolveRequest struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,json=addressFamilies,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResolveRequest) Reset() { *m = ResolveRequest{} } +func (m *ResolveRequest) String() string { return proto.CompactTextString(m) } +func (*ResolveRequest) ProtoMessage() {} +func (*ResolveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{32} +} +func (m *ResolveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResolveRequest.Unmarshal(m, b) +} +func (m *ResolveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResolveRequest.Marshal(b, m, deterministic) +} +func (dst *ResolveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResolveRequest.Merge(dst, src) +} +func (m *ResolveRequest) XXX_Size() int { + return xxx_messageInfo_ResolveRequest.Size(m) +} +func (m *ResolveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResolveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ResolveRequest proto.InternalMessageInfo + +func (m *ResolveRequest) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily { + if m != nil { + return m.AddressFamilies + } + return nil +} + +type ResolveReply struct { + PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` + CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name,json=canonicalName" json:"canonical_name,omitempty"` + Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResolveReply) Reset() { *m = ResolveReply{} } +func (m *ResolveReply) String() string { return proto.CompactTextString(m) } +func (*ResolveReply) ProtoMessage() {} +func (*ResolveReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{33} +} +func (m *ResolveReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResolveReply.Unmarshal(m, b) +} +func (m *ResolveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResolveReply.Marshal(b, m, deterministic) +} +func (dst *ResolveReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResolveReply.Merge(dst, src) +} +func (m *ResolveReply) XXX_Size() int { + return xxx_messageInfo_ResolveReply.Size(m) +} +func (m *ResolveReply) XXX_DiscardUnknown() { + xxx_messageInfo_ResolveReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ResolveReply proto.InternalMessageInfo + +func (m *ResolveReply) GetPackedAddress() [][]byte { + if m != nil { + return m.PackedAddress + } + return nil +} + +func (m *ResolveReply) GetCanonicalName() string { + if m != nil && m.CanonicalName != nil { + return *m.CanonicalName + } + return "" +} + +func (m *ResolveReply) GetAliases() []string { + if m != nil { + return m.Aliases + } + return nil +} + +func init() { + proto.RegisterType((*RemoteSocketServiceError)(nil), "appengine.RemoteSocketServiceError") + proto.RegisterType((*AddressPort)(nil), "appengine.AddressPort") + proto.RegisterType((*CreateSocketRequest)(nil), "appengine.CreateSocketRequest") + proto.RegisterType((*CreateSocketReply)(nil), "appengine.CreateSocketReply") + proto.RegisterType((*BindRequest)(nil), "appengine.BindRequest") + proto.RegisterType((*BindReply)(nil), "appengine.BindReply") + proto.RegisterType((*GetSocketNameRequest)(nil), "appengine.GetSocketNameRequest") + proto.RegisterType((*GetSocketNameReply)(nil), "appengine.GetSocketNameReply") + proto.RegisterType((*GetPeerNameRequest)(nil), "appengine.GetPeerNameRequest") + proto.RegisterType((*GetPeerNameReply)(nil), "appengine.GetPeerNameReply") + proto.RegisterType((*SocketOption)(nil), "appengine.SocketOption") + proto.RegisterType((*SetSocketOptionsRequest)(nil), "appengine.SetSocketOptionsRequest") + proto.RegisterType((*SetSocketOptionsReply)(nil), "appengine.SetSocketOptionsReply") + proto.RegisterType((*GetSocketOptionsRequest)(nil), "appengine.GetSocketOptionsRequest") + proto.RegisterType((*GetSocketOptionsReply)(nil), "appengine.GetSocketOptionsReply") + proto.RegisterType((*ConnectRequest)(nil), "appengine.ConnectRequest") + proto.RegisterType((*ConnectReply)(nil), "appengine.ConnectReply") + proto.RegisterType((*ListenRequest)(nil), "appengine.ListenRequest") + proto.RegisterType((*ListenReply)(nil), "appengine.ListenReply") + proto.RegisterType((*AcceptRequest)(nil), "appengine.AcceptRequest") + proto.RegisterType((*AcceptReply)(nil), "appengine.AcceptReply") + proto.RegisterType((*ShutDownRequest)(nil), "appengine.ShutDownRequest") + proto.RegisterType((*ShutDownReply)(nil), "appengine.ShutDownReply") + proto.RegisterType((*CloseRequest)(nil), "appengine.CloseRequest") + proto.RegisterType((*CloseReply)(nil), "appengine.CloseReply") + proto.RegisterType((*SendRequest)(nil), "appengine.SendRequest") + proto.RegisterType((*SendReply)(nil), "appengine.SendReply") + proto.RegisterType((*ReceiveRequest)(nil), "appengine.ReceiveRequest") + proto.RegisterType((*ReceiveReply)(nil), "appengine.ReceiveReply") + proto.RegisterType((*PollEvent)(nil), "appengine.PollEvent") + proto.RegisterType((*PollRequest)(nil), "appengine.PollRequest") + proto.RegisterType((*PollReply)(nil), "appengine.PollReply") + proto.RegisterType((*ResolveRequest)(nil), "appengine.ResolveRequest") + proto.RegisterType((*ResolveReply)(nil), "appengine.ResolveReply") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/socket/socket_service.proto", fileDescriptor_socket_service_b5f8f233dc327808) +} + +var fileDescriptor_socket_service_b5f8f233dc327808 = []byte{ + // 3088 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0x5f, 0x77, 0xe3, 0xc6, + 0x75, 0x37, 0x48, 0xfd, 0xe3, 0x90, 0x94, 0xee, 0x62, 0xa5, 0x5d, 0x25, 0x6e, 0x12, 0x05, 0x8e, + 0x1b, 0x25, 0x8e, 0x77, 0x6d, 0x39, 0x4d, 0x9b, 0xa4, 0x49, 0x16, 0x04, 0x86, 0x24, 0x4c, 0x00, + 0x03, 0xcd, 0x0c, 0x25, 0xd1, 0x6d, 0x8a, 0xd0, 0x22, 0xa4, 0x65, 0x4c, 0x11, 0x0c, 0xc9, 0xdd, + 0xf5, 0xba, 0x69, 0xaa, 0xfe, 0x39, 0xfd, 0x12, 0x7d, 0xe8, 0x73, 0x3f, 0x43, 0x4f, 0x4f, 0x5f, + 0xfa, 0xec, 0xc7, 0x7e, 0x84, 0x9e, 0xbe, 0xb4, 0x9f, 0xa1, 0x67, 0x06, 0xe0, 0x60, 0xc8, 0xd5, + 0xae, 0x77, 0x75, 0x72, 0x4e, 0x9e, 0xa4, 0xfb, 0xbb, 0x77, 0xee, 0xff, 0x99, 0xb9, 0x03, 0xa2, + 0x47, 0x97, 0x69, 0x7a, 0x39, 0x4a, 0x1e, 0x5c, 0xa6, 0xa3, 0xfe, 0xf8, 0xf2, 0x41, 0x3a, 0xbd, + 0x7c, 0xd8, 0x9f, 0x4c, 0x92, 0xf1, 0xe5, 0x70, 0x9c, 0x3c, 0x1c, 0x8e, 0xe7, 0xc9, 0x74, 0xdc, + 0x1f, 0x3d, 0x9c, 0xa5, 0xe7, 0x9f, 0x25, 0xf3, 0xfc, 0x4f, 0x3c, 0x4b, 0xa6, 0x4f, 0x87, 0xe7, + 0xc9, 0x83, 0xc9, 0x34, 0x9d, 0xa7, 0x66, 0x45, 0xc9, 0x5b, 0xff, 0xbc, 0x8b, 0xf6, 0x69, 0x72, + 0x95, 0xce, 0x13, 0x26, 0x25, 0x59, 0x26, 0x88, 0xa7, 0xd3, 0x74, 0x6a, 0x7e, 0x07, 0xd5, 0x66, + 0xcf, 0x67, 0xf3, 0xe4, 0x2a, 0x4e, 0x04, 0xbd, 0x6f, 0x1c, 0x18, 0x87, 0xeb, 0x3f, 0x31, 0x3e, + 0xa0, 0xd5, 0x0c, 0xce, 0xa4, 0xbe, 0x8d, 0x6a, 0x92, 0x1d, 0x0f, 0x92, 0x79, 0x7f, 0x38, 0xda, + 0x2f, 0x1d, 0x18, 0x87, 0x15, 0x5a, 0x95, 0x98, 0x2b, 0x21, 0xeb, 0x73, 0x54, 0x91, 0xb2, 0x4e, + 0x3a, 0x48, 0x4c, 0x40, 0x35, 0xd6, 0x63, 0x1c, 0x07, 0x31, 0xa6, 0x94, 0x50, 0x30, 0xcc, 0x3a, + 0xaa, 0xb4, 0x6c, 0x2f, 0x27, 0x4b, 0x66, 0x15, 0x6d, 0x36, 0x6d, 0xcf, 0xef, 0x52, 0x0c, 0x6b, + 0xe6, 0x1e, 0xba, 0x13, 0x61, 0x1a, 0x78, 0x8c, 0x79, 0x24, 0x8c, 0x5d, 0x1c, 0x7a, 0xd8, 0x85, + 0x75, 0xf3, 0x2e, 0xda, 0xf1, 0xc2, 0x13, 0xdb, 0xf7, 0xdc, 0x98, 0xe2, 0xe3, 0x2e, 0x66, 0x1c, + 0x36, 0xcc, 0x3b, 0xa8, 0xce, 0x88, 0xd3, 0xc1, 0x3c, 0x76, 0x7c, 0xc2, 0xb0, 0x0b, 0x9b, 0xd6, + 0xbf, 0x99, 0xa8, 0xca, 0x34, 0x67, 0x77, 0x50, 0x95, 0xf5, 0x58, 0xcc, 0xba, 0x8e, 0x83, 0x19, + 0x83, 0xb7, 0x84, 0x6d, 0x01, 0x60, 0x61, 0x04, 0x0c, 0x73, 0x1b, 0x21, 0x49, 0x86, 0x04, 0x87, + 0x1c, 0x4a, 0x8a, 0xcd, 0xa8, 0xd3, 0x86, 0xb2, 0x22, 0xbd, 0x90, 0x53, 0x58, 0x13, 0x9e, 0x66, + 0x24, 0x81, 0x75, 0xc5, 0x0b, 0xcf, 0x3c, 0x02, 0x1b, 0x8a, 0x3c, 0x6a, 0x78, 0x2d, 0xd8, 0x5c, + 0x18, 0x16, 0x8a, 0xcf, 0xb0, 0x03, 0x5b, 0x8a, 0xdf, 0xb0, 0xdd, 0x26, 0x54, 0x94, 0x61, 0xa7, + 0xed, 0xf9, 0x2e, 0x20, 0x45, 0xdb, 0x2d, 0xdb, 0x0b, 0xa1, 0x2a, 0x02, 0x96, 0xf4, 0x29, 0xe9, + 0xfa, 0x6e, 0xc3, 0x27, 0x4e, 0x07, 0xaa, 0x9a, 0xb7, 0x01, 0x0e, 0xa0, 0x56, 0x2c, 0x12, 0xd1, + 0x41, 0x5d, 0xd1, 0x4d, 0xbb, 0xeb, 0x73, 0xd8, 0xd6, 0x9c, 0xe0, 0x0d, 0xbf, 0x03, 0x3b, 0x85, + 0x13, 0x5d, 0xd6, 0x03, 0x50, 0xf2, 0xf8, 0xcc, 0x63, 0x1c, 0xee, 0x28, 0xf6, 0x99, 0x8b, 0x4f, + 0xc0, 0xd4, 0xcc, 0x09, 0xfa, 0xae, 0xae, 0xce, 0xf5, 0x28, 0xec, 0x2a, 0x01, 0x8f, 0x09, 0x7a, + 0xaf, 0xa0, 0x45, 0xa9, 0xe0, 0x5e, 0xa1, 0xa0, 0xe9, 0xf9, 0x18, 0xee, 0x2b, 0x3a, 0x90, 0xf4, + 0xbe, 0x66, 0x80, 0xf3, 0x1e, 0x7c, 0x4d, 0x19, 0xe0, 0x67, 0xbc, 0xc1, 0x7a, 0xf0, 0x75, 0xe5, + 0x50, 0x53, 0x24, 0xf5, 0x6d, 0x4d, 0x9e, 0x45, 0x0e, 0xfc, 0x91, 0xa2, 0x59, 0xe4, 0x45, 0x18, + 0xbe, 0xa1, 0xc4, 0x29, 0x69, 0x32, 0xf8, 0x66, 0x61, 0xce, 0xf7, 0xc2, 0x0e, 0x7c, 0xab, 0xa8, + 0xbd, 0x90, 0x3e, 0x30, 0x6b, 0x68, 0x4b, 0x92, 0x2e, 0x09, 0xe0, 0xdb, 0x4a, 0x98, 0xda, 0x61, + 0x0b, 0x83, 0xa5, 0x7c, 0x71, 0xb1, 0xed, 0xfa, 0x1d, 0x78, 0x47, 0x76, 0x9b, 0x02, 0x44, 0x3d, + 0xde, 0x31, 0x77, 0x11, 0x64, 0xfe, 0xd8, 0x01, 0xe6, 0x84, 0xf8, 0x24, 0x6c, 0xc1, 0x77, 0x34, + 0x2f, 0x7d, 0xa7, 0x03, 0xef, 0xea, 0x5e, 0xf7, 0x18, 0xfc, 0xb1, 0x52, 0x14, 0x12, 0x8e, 0x83, + 0x88, 0xf7, 0xe0, 0xbb, 0xca, 0x33, 0x9f, 0x90, 0x08, 0x0e, 0xf5, 0x3a, 0xb3, 0x16, 0x7c, 0xbf, + 0x68, 0x43, 0x97, 0x06, 0xf0, 0x9e, 0xd6, 0x3b, 0x34, 0x6c, 0xc1, 0x0f, 0xf2, 0x1d, 0x16, 0x63, + 0xff, 0x28, 0x64, 0xbd, 0xd0, 0x81, 0xf7, 0x95, 0x84, 0xff, 0x51, 0xdb, 0xe7, 0xf0, 0x40, 0xa3, + 0x29, 0xe3, 0xf0, 0xb0, 0xa0, 0x43, 0xa1, 0xe1, 0x03, 0x15, 0x6c, 0x37, 0xb4, 0xb9, 0xd3, 0x86, + 0x0f, 0x35, 0x0f, 0x1c, 0xe6, 0xc1, 0x51, 0xb1, 0xe0, 0x48, 0x28, 0xfc, 0x48, 0xef, 0x66, 0x0c, + 0x3f, 0xd4, 0x49, 0x0a, 0x7f, 0xa2, 0xa4, 0xcf, 0x9a, 0x5d, 0xdf, 0x87, 0x1f, 0x69, 0xda, 0xec, + 0x90, 0xc0, 0x9f, 0x2a, 0x73, 0x42, 0xfc, 0xd8, 0x81, 0x3f, 0xd3, 0x01, 0xe6, 0x73, 0xf8, 0xb1, + 0x5a, 0xd1, 0x68, 0x92, 0x90, 0xc3, 0x4f, 0xf5, 0x1c, 0x72, 0x0a, 0x7f, 0xae, 0xb5, 0xa2, 0x6b, + 0x73, 0x1b, 0x7e, 0xa6, 0x3c, 0xe0, 0x5e, 0x80, 0xe1, 0xe7, 0xc5, 0xe6, 0x24, 0x8c, 0xc2, 0x2f, + 0xb4, 0xe5, 0x21, 0xe6, 0xf0, 0x48, 0xa3, 0xa3, 0x4e, 0x0b, 0x6c, 0xa5, 0x8e, 0xe2, 0x80, 0x70, + 0x0c, 0x0d, 0x4d, 0xbf, 0xec, 0x1d, 0x47, 0x35, 0x8b, 0xed, 0x9e, 0x80, 0x5b, 0x34, 0x1e, 0x0d, + 0x42, 0x0e, 0x58, 0x99, 0x73, 0x48, 0x10, 0x40, 0x53, 0xb1, 0x23, 0x4a, 0x38, 0x81, 0x96, 0xaa, + 0x78, 0xd0, 0xf5, 0xb9, 0xd7, 0x26, 0x11, 0xb4, 0x8b, 0xf6, 0x22, 0xdc, 0x25, 0x1c, 0x3c, 0x3d, + 0x05, 0xa2, 0xe8, 0x1f, 0xab, 0x45, 0xe4, 0x04, 0xd3, 0xa6, 0x4f, 0x4e, 0xa1, 0xa3, 0x0a, 0x1d, + 0x12, 0xde, 0x0d, 0xbd, 0x63, 0xf0, 0x8b, 0x3c, 0xd9, 0x6e, 0xd3, 0x85, 0x40, 0x0f, 0xc4, 0x69, + 0xb7, 0x20, 0x54, 0x80, 0xef, 0x35, 0x6c, 0xc7, 0x01, 0xa2, 0x03, 0x0d, 0xdb, 0x85, 0x48, 0x07, + 0x98, 0x13, 0xc2, 0xb1, 0x0e, 0x04, 0xf6, 0x19, 0xd0, 0xa2, 0xbf, 0xbc, 0x86, 0x3c, 0xcc, 0x58, + 0xb1, 0xd1, 0x7d, 0x86, 0x8f, 0x81, 0x2b, 0x09, 0x8a, 0x19, 0xb7, 0x29, 0x87, 0xae, 0x42, 0x18, + 0xa7, 0x72, 0xbb, 0x9d, 0xa8, 0x35, 0x5d, 0x86, 0x29, 0x83, 0x53, 0x3d, 0x18, 0x71, 0x8a, 0xc3, + 0x99, 0xda, 0x4e, 0xae, 0xd0, 0xe2, 0xba, 0x94, 0xe2, 0x63, 0xe8, 0x29, 0xb9, 0x80, 0xb5, 0x98, + 0xf7, 0x09, 0x86, 0x4f, 0x4c, 0x13, 0x6d, 0x17, 0xe9, 0xe5, 0xbd, 0x08, 0xc3, 0x5f, 0xa8, 0xf3, + 0x32, 0x24, 0x12, 0x25, 0x11, 0x87, 0xbf, 0x34, 0xef, 0xa3, 0xbb, 0x85, 0x60, 0x48, 0x58, 0x37, + 0x8a, 0x08, 0xe5, 0xf0, 0x4b, 0xc5, 0x10, 0x86, 0x79, 0xc1, 0xf8, 0x2b, 0xa5, 0x9a, 0x44, 0xc2, + 0xad, 0x6e, 0x14, 0x41, 0xac, 0x1f, 0x7b, 0xac, 0x2b, 0x80, 0x85, 0x9f, 0x51, 0xb3, 0x58, 0xfa, + 0x2b, 0x85, 0xda, 0x1a, 0xda, 0x57, 0x0a, 0x45, 0x3c, 0x5e, 0xd8, 0x65, 0x18, 0x3e, 0x15, 0x77, + 0x9c, 0xc2, 0x42, 0xc2, 0xed, 0x13, 0xdb, 0xf3, 0xe1, 0xbc, 0x48, 0x08, 0xe6, 0x2e, 0x39, 0x0d, + 0x61, 0x50, 0x04, 0x85, 0x79, 0x37, 0xa4, 0xd8, 0x76, 0xda, 0x90, 0x14, 0xc7, 0x07, 0xe6, 0x14, + 0x33, 0xcc, 0xe1, 0x42, 0x99, 0x76, 0x48, 0x18, 0xda, 0x0d, 0x42, 0x39, 0x76, 0xe1, 0x52, 0x99, + 0x16, 0x68, 0x26, 0xf9, 0x58, 0x8b, 0xa5, 0xd1, 0x6d, 0x32, 0x18, 0x2a, 0xc0, 0x63, 0x42, 0x0c, + 0x7e, 0xad, 0x97, 0x45, 0x22, 0x9f, 0x29, 0x83, 0xac, 0xdd, 0xcd, 0x1c, 0x1b, 0x29, 0x83, 0x9c, + 0x90, 0xc0, 0x0e, 0x7b, 0x14, 0x37, 0x19, 0x5c, 0x29, 0x41, 0xb1, 0x07, 0x5d, 0xd2, 0xe5, 0x30, + 0x5e, 0xf2, 0x8c, 0xe2, 0x66, 0x57, 0xdc, 0xd2, 0xa9, 0x12, 0x6c, 0x13, 0x96, 0x69, 0x9c, 0x28, + 0x41, 0x01, 0x2d, 0x62, 0xfd, 0x8d, 0x72, 0xc6, 0xf6, 0x29, 0xb6, 0xdd, 0x1e, 0x4c, 0x55, 0x4a, + 0xbc, 0x30, 0xa2, 0xa4, 0x45, 0xc5, 0xa5, 0x3e, 0x2b, 0xb6, 0x23, 0xb7, 0x7d, 0x0c, 0xf3, 0xe2, + 0x38, 0x73, 0x7c, 0x6c, 0x87, 0xf0, 0x44, 0x2f, 0x61, 0x68, 0x07, 0xf0, 0xb4, 0x00, 0xb2, 0xe4, + 0x3f, 0xd3, 0xae, 0x32, 0x21, 0xf0, 0xb9, 0x72, 0x31, 0x3b, 0x11, 0x3c, 0x02, 0xcf, 0x95, 0x88, + 0x7b, 0xdc, 0x25, 0x1c, 0xbe, 0xd0, 0xce, 0xf1, 0x00, 0xbb, 0x5e, 0x37, 0x80, 0xbf, 0x56, 0xde, + 0x65, 0x80, 0x6c, 0xcd, 0xdf, 0x2a, 0x39, 0xc7, 0x0e, 0x1d, 0xec, 0x63, 0x17, 0xfe, 0x46, 0x3b, + 0x7f, 0x3a, 0xb8, 0x07, 0xbf, 0x53, 0xeb, 0x3a, 0xb8, 0x87, 0xcf, 0x22, 0x8f, 0x62, 0x17, 0xfe, + 0xd6, 0xdc, 0x2d, 0x40, 0x8a, 0x4f, 0x48, 0x07, 0xbb, 0x70, 0x6d, 0x98, 0x7b, 0x79, 0xa2, 0x24, + 0xfa, 0x31, 0x76, 0x44, 0xad, 0xff, 0xce, 0x30, 0xef, 0x2e, 0x1a, 0xf7, 0x34, 0xc4, 0x54, 0x5c, + 0x51, 0xf0, 0xf7, 0x86, 0xb9, 0x9f, 0xb7, 0x79, 0x48, 0x38, 0xc5, 0x8e, 0x38, 0x48, 0xec, 0x86, + 0x8f, 0xe1, 0x1f, 0x0c, 0x13, 0x16, 0xe7, 0x44, 0xb3, 0xe3, 0xf9, 0x3e, 0xfc, 0xa3, 0xf1, 0xf5, + 0x12, 0x18, 0xd6, 0x15, 0xaa, 0xda, 0x83, 0xc1, 0x34, 0x99, 0xcd, 0xa2, 0x74, 0x3a, 0x37, 0x4d, + 0xb4, 0x36, 0x49, 0xa7, 0xf3, 0x7d, 0xe3, 0xa0, 0x74, 0xb8, 0x4e, 0xe5, 0xff, 0xe6, 0xbb, 0x68, + 0x7b, 0xd2, 0x3f, 0xff, 0x2c, 0x19, 0xc4, 0xfd, 0x4c, 0x52, 0xce, 0x7f, 0x35, 0x5a, 0xcf, 0xd0, + 0x7c, 0xb9, 0xf9, 0x0e, 0xaa, 0x3f, 0x4e, 0x67, 0xf3, 0x71, 0xff, 0x2a, 0x89, 0x1f, 0x0f, 0xc7, + 0xf3, 0xfd, 0xb2, 0x9c, 0x12, 0x6b, 0x0b, 0xb0, 0x3d, 0x1c, 0xcf, 0xad, 0x7f, 0x5a, 0x43, 0x77, + 0x9d, 0x69, 0xd2, 0x5f, 0x0c, 0xa3, 0x34, 0xf9, 0xcd, 0x93, 0x64, 0x36, 0x37, 0x1d, 0xb4, 0x71, + 0xd1, 0xbf, 0x1a, 0x8e, 0x9e, 0x4b, 0xcb, 0xdb, 0x47, 0xef, 0x3d, 0x50, 0x03, 0xec, 0x83, 0x1b, + 0xe4, 0x1f, 0x64, 0x54, 0x53, 0x2e, 0xa1, 0xf9, 0x52, 0xd3, 0x43, 0x5b, 0x72, 0xfa, 0x3d, 0x4f, + 0xc5, 0x88, 0x2a, 0xd4, 0xbc, 0xff, 0x5a, 0x6a, 0xa2, 0x7c, 0x11, 0x55, 0xcb, 0xcd, 0x9f, 0xa3, + 0xed, 0x7c, 0xae, 0x4e, 0x27, 0xf3, 0x61, 0x3a, 0x9e, 0xed, 0x97, 0x0f, 0xca, 0x87, 0xd5, 0xa3, + 0xfb, 0x9a, 0xc2, 0x6c, 0x31, 0x91, 0x7c, 0x5a, 0x9f, 0x69, 0xd4, 0xcc, 0x6c, 0xa0, 0x3b, 0x93, + 0x69, 0xfa, 0xf9, 0xf3, 0x38, 0xf9, 0x3c, 0x9b, 0xd6, 0xe3, 0xe1, 0x64, 0x7f, 0xed, 0xc0, 0x38, + 0xac, 0x1e, 0xdd, 0xd3, 0x54, 0x68, 0xa9, 0xa7, 0x3b, 0x72, 0x01, 0xce, 0xe5, 0xbd, 0x89, 0x79, + 0x88, 0xb6, 0x47, 0xc3, 0xd9, 0x3c, 0x19, 0xc7, 0x9f, 0xf6, 0xcf, 0x3f, 0x1b, 0xa5, 0x97, 0xfb, + 0xeb, 0x8b, 0xe9, 0xbc, 0x9e, 0x31, 0x1a, 0x19, 0x6e, 0x7e, 0x84, 0x2a, 0x53, 0x39, 0xe1, 0x0b, + 0x2b, 0x1b, 0xaf, 0xb4, 0xb2, 0x95, 0x09, 0x7a, 0x13, 0x73, 0x0f, 0x6d, 0xf4, 0x27, 0x93, 0x78, + 0x38, 0xd8, 0xaf, 0xc8, 0x42, 0xad, 0xf7, 0x27, 0x13, 0x6f, 0x60, 0x7e, 0x03, 0xa1, 0xc9, 0x34, + 0xfd, 0x75, 0x72, 0x3e, 0x17, 0x2c, 0x74, 0x60, 0x1c, 0x96, 0x69, 0x25, 0x47, 0xbc, 0x81, 0x65, + 0xa1, 0x9a, 0x9e, 0x7b, 0x73, 0x0b, 0xad, 0x79, 0xd1, 0xd3, 0x1f, 0x82, 0x91, 0xff, 0xf7, 0x23, + 0x28, 0x59, 0x16, 0xda, 0x5e, 0x4e, 0xac, 0xb9, 0x89, 0xca, 0xdc, 0x89, 0xc0, 0x10, 0xff, 0x74, + 0xdd, 0x08, 0x4a, 0xd6, 0x97, 0x06, 0xba, 0xb3, 0x5c, 0x91, 0xc9, 0xe8, 0xb9, 0xf9, 0x1e, 0xba, + 0x93, 0xa7, 0x7d, 0x90, 0xcc, 0xce, 0xa7, 0xc3, 0xc9, 0x3c, 0x7f, 0x93, 0x54, 0x28, 0x64, 0x0c, + 0x57, 0xe1, 0xe6, 0xcf, 0xd0, 0xb6, 0x78, 0xf4, 0x24, 0x53, 0xd5, 0x97, 0xe5, 0x57, 0x86, 0x5e, + 0xcf, 0xa4, 0x17, 0xfd, 0xfa, 0x7b, 0x28, 0xd1, 0xf7, 0x2b, 0x5b, 0xff, 0xb3, 0x09, 0xd7, 0xd7, + 0xd7, 0xd7, 0x25, 0xeb, 0x77, 0xa8, 0xda, 0x18, 0x8e, 0x07, 0x8b, 0x86, 0x7e, 0x49, 0x24, 0xa5, + 0x1b, 0x23, 0xb9, 0xd1, 0x15, 0xd1, 0xc1, 0xaf, 0xef, 0x8a, 0x45, 0x50, 0x25, 0xb3, 0x2f, 0xf2, + 0x78, 0xa3, 0x42, 0xe3, 0x8d, 0x62, 0xb3, 0x1c, 0xb4, 0xdb, 0x4a, 0xe6, 0x59, 0x75, 0xc2, 0xfe, + 0x55, 0x72, 0x9b, 0xc8, 0xac, 0x33, 0x64, 0xae, 0x28, 0x79, 0xa9, 0x7b, 0xa5, 0x37, 0x73, 0xcf, + 0x96, 0x9a, 0xa3, 0x24, 0x99, 0xde, 0xda, 0x39, 0x07, 0xc1, 0x92, 0x0a, 0xe1, 0xda, 0x43, 0xb4, + 0x39, 0x49, 0x92, 0xe9, 0x57, 0x3b, 0xb4, 0x21, 0xc4, 0xbc, 0x89, 0xf5, 0xe5, 0xe6, 0x62, 0x47, + 0x64, 0x7b, 0xdf, 0xfc, 0x05, 0x5a, 0x1f, 0x25, 0x4f, 0x93, 0x51, 0x7e, 0x92, 0x7d, 0xef, 0x25, + 0x27, 0xc6, 0x12, 0xe1, 0x8b, 0x05, 0x34, 0x5b, 0x67, 0x3e, 0x42, 0x1b, 0xd9, 0xa1, 0x93, 0x1f, + 0x62, 0x87, 0xaf, 0xa3, 0x41, 0x46, 0x90, 0xaf, 0x33, 0x77, 0xd1, 0xfa, 0xd3, 0xfe, 0xe8, 0x49, + 0xb2, 0x5f, 0x3e, 0x28, 0x1d, 0xd6, 0x68, 0x46, 0x58, 0x09, 0xba, 0xf3, 0x82, 0x4d, 0xed, 0x41, + 0xcd, 0x88, 0x1f, 0x7b, 0x11, 0xbc, 0x25, 0x67, 0x95, 0x02, 0xca, 0xfe, 0x05, 0x43, 0xce, 0x16, + 0x05, 0x2c, 0xb6, 0xf3, 0xc6, 0x0a, 0x26, 0x76, 0xf6, 0x1d, 0xeb, 0xdf, 0xd7, 0x11, 0xac, 0x7a, + 0x26, 0x6f, 0xbb, 0x85, 0x60, 0xec, 0xe2, 0x46, 0xb7, 0x05, 0x86, 0x1c, 0xc9, 0x14, 0x48, 0xc5, + 0x94, 0x28, 0xc6, 0x23, 0x28, 0x2d, 0xa9, 0x8d, 0xe5, 0x95, 0x5a, 0x5e, 0xd6, 0x90, 0x7d, 0x47, + 0x58, 0x5b, 0xd6, 0xe0, 0x92, 0x90, 0x53, 0xd2, 0xe5, 0x18, 0xd6, 0x97, 0x19, 0x0d, 0x4a, 0x6c, + 0xd7, 0xb1, 0xe5, 0x07, 0x04, 0x31, 0x74, 0x28, 0x06, 0x0b, 0xdd, 0x46, 0xb7, 0x09, 0x9b, 0xcb, + 0x28, 0x75, 0x4e, 0x04, 0xba, 0xb5, 0xac, 0xa4, 0x83, 0x71, 0x64, 0xfb, 0xde, 0x09, 0x86, 0xca, + 0x32, 0x83, 0x90, 0x86, 0x17, 0xfa, 0x5e, 0x88, 0x01, 0x2d, 0xeb, 0xf1, 0xbd, 0xb0, 0x85, 0x29, + 0xd4, 0xcd, 0x7b, 0xc8, 0x5c, 0xd2, 0x2e, 0x86, 0x25, 0x02, 0xbb, 0xcb, 0x38, 0x0b, 0xdd, 0x0c, + 0xdf, 0xd3, 0x6a, 0xe2, 0x45, 0x31, 0x27, 0x0c, 0x8c, 0x15, 0x88, 0xfb, 0x50, 0xd2, 0xca, 0xe4, + 0x45, 0x71, 0x5b, 0x8c, 0x9a, 0x8e, 0x0f, 0xe5, 0x65, 0x98, 0x44, 0xdc, 0x23, 0x21, 0x83, 0x35, + 0xcd, 0x16, 0x77, 0xa2, 0x58, 0x3c, 0xef, 0x7d, 0xbb, 0x07, 0x86, 0x26, 0x2e, 0xf0, 0xc0, 0x3e, + 0x63, 0xb8, 0x05, 0x25, 0x2d, 0xdb, 0x02, 0x76, 0x08, 0xed, 0x40, 0x59, 0x0b, 0x5b, 0x80, 0x22, + 0x21, 0x9e, 0xeb, 0x63, 0x58, 0x33, 0xf7, 0xd1, 0xee, 0x2a, 0x23, 0xe4, 0x27, 0x3e, 0xac, 0xaf, + 0x98, 0x15, 0x1c, 0x27, 0x14, 0x65, 0x58, 0x36, 0x2b, 0x9e, 0xb0, 0x21, 0x87, 0xcd, 0x15, 0xf1, + 0x2c, 0x81, 0x47, 0xb0, 0x65, 0xbe, 0x8d, 0xee, 0x6b, 0xb8, 0x8b, 0x9b, 0x98, 0xc6, 0xb6, 0xe3, + 0xe0, 0x88, 0x43, 0x65, 0x85, 0x79, 0xea, 0x85, 0x2e, 0x39, 0x8d, 0x1d, 0xdf, 0x0e, 0x22, 0x40, + 0x2b, 0x81, 0x78, 0x61, 0x93, 0x40, 0x75, 0x25, 0x90, 0xe3, 0xae, 0xe7, 0x74, 0x6c, 0xa7, 0x03, + 0x35, 0x39, 0x11, 0x3d, 0x47, 0xf7, 0xd9, 0xe2, 0xc8, 0xca, 0xaf, 0xf3, 0x5b, 0x1d, 0xea, 0x1f, + 0xa2, 0xcd, 0xc5, 0xec, 0x50, 0x7a, 0xf5, 0xec, 0xb0, 0x90, 0xb3, 0xee, 0xa3, 0xbd, 0x17, 0x4d, + 0x4f, 0x46, 0xcf, 0x85, 0x4f, 0xad, 0x3f, 0x90, 0x4f, 0x1f, 0xa3, 0xbd, 0xd6, 0x4d, 0x3e, 0xdd, + 0x46, 0xd7, 0xbf, 0x18, 0x68, 0xdb, 0x49, 0xc7, 0xe3, 0xe4, 0x7c, 0x7e, 0x2b, 0xf7, 0x97, 0xe6, + 0x9c, 0x57, 0xdf, 0x8f, 0xc5, 0x9c, 0xf3, 0x1e, 0xda, 0x99, 0x0f, 0xaf, 0x92, 0xf4, 0xc9, 0x3c, + 0x9e, 0x25, 0xe7, 0xe9, 0x78, 0x90, 0xcd, 0x09, 0xc6, 0x4f, 0x4a, 0xef, 0x7f, 0x48, 0xb7, 0x73, + 0x16, 0xcb, 0x38, 0xd6, 0x2f, 0x51, 0x4d, 0x39, 0xf8, 0x7b, 0xba, 0x48, 0xf5, 0x21, 0xe1, 0x04, + 0xd5, 0x7d, 0x39, 0xb9, 0xdd, 0x2a, 0xfc, 0x7d, 0xb4, 0xb9, 0x98, 0x04, 0x4b, 0x72, 0x3e, 0x5f, + 0x90, 0x56, 0x1d, 0x55, 0x17, 0x7a, 0x45, 0xbb, 0x0c, 0x51, 0xdd, 0x3e, 0x3f, 0x4f, 0x26, 0xb7, + 0xcb, 0xf2, 0x0d, 0x09, 0x2b, 0xbd, 0x34, 0x61, 0xd7, 0x06, 0xaa, 0x2e, 0x6c, 0x89, 0x84, 0x1d, + 0xa1, 0xbd, 0x71, 0xf2, 0x2c, 0x7e, 0xd1, 0x5a, 0xf6, 0x66, 0xb8, 0x3b, 0x4e, 0x9e, 0xb1, 0x1b, + 0x06, 0xb9, 0xbc, 0xac, 0xaf, 0x39, 0xc8, 0x65, 0xd2, 0x39, 0x64, 0xfd, 0x97, 0x81, 0x76, 0xd8, + 0xe3, 0x27, 0x73, 0x37, 0x7d, 0x76, 0xbb, 0xbc, 0x7e, 0x80, 0xca, 0x8f, 0xd3, 0x67, 0xf9, 0x6d, + 0xfb, 0x4d, 0xbd, 0x8b, 0x97, 0xb5, 0x3e, 0x68, 0xa7, 0xcf, 0xa8, 0x10, 0x35, 0xbf, 0x85, 0xaa, + 0xb3, 0x64, 0x3c, 0x88, 0xd3, 0x8b, 0x8b, 0x59, 0x32, 0x97, 0xd7, 0x6c, 0x99, 0x22, 0x01, 0x11, + 0x89, 0x58, 0x0e, 0x2a, 0xb7, 0xd3, 0x67, 0xfa, 0x45, 0xd6, 0xee, 0xf2, 0x98, 0xba, 0xcb, 0xf7, + 0xa8, 0xc0, 0x4e, 0xc5, 0x85, 0xa7, 0xdd, 0x1b, 0x99, 0xdc, 0x29, 0x85, 0xb2, 0xb5, 0x83, 0xea, + 0x85, 0x07, 0xa2, 0xae, 0xbf, 0x42, 0x35, 0x67, 0x94, 0xce, 0x6e, 0x35, 0xed, 0x98, 0xef, 0x2c, + 0xfb, 0x2c, 0xea, 0x51, 0x96, 0x25, 0xd5, 0xfd, 0xae, 0x21, 0x94, 0x5b, 0x10, 0xf6, 0xfe, 0xcf, + 0x40, 0x55, 0x96, 0xdc, 0x72, 0xa8, 0xbd, 0x87, 0xd6, 0x06, 0xfd, 0x79, 0x5f, 0xa6, 0xb5, 0xd6, + 0x28, 0x6d, 0x19, 0x54, 0xd2, 0xe2, 0x9d, 0x38, 0x9b, 0x4f, 0x93, 0xfe, 0xd5, 0x72, 0xf6, 0x6a, + 0x19, 0x98, 0xf9, 0x61, 0xde, 0x47, 0xeb, 0x17, 0xa3, 0xfe, 0xe5, 0x4c, 0x0e, 0xe4, 0xf2, 0xc9, + 0x93, 0xd1, 0x62, 0x3e, 0x93, 0x51, 0xcc, 0x53, 0xf9, 0x1a, 0x7a, 0xc5, 0x7c, 0x26, 0xc4, 0x78, + 0x7a, 0x53, 0x37, 0x6f, 0xbc, 0xb4, 0x9b, 0x0f, 0x51, 0x25, 0x8b, 0x57, 0xb4, 0xf2, 0xdb, 0xa8, + 0x22, 0x1c, 0x8e, 0x67, 0xc9, 0x78, 0x9e, 0xfd, 0x30, 0x42, 0xb7, 0x04, 0xc0, 0x92, 0xf1, 0xdc, + 0xfa, 0x4f, 0x03, 0x6d, 0xd3, 0xe4, 0x3c, 0x19, 0x3e, 0xbd, 0x5d, 0x35, 0x94, 0xf2, 0xe1, 0x17, + 0x49, 0xbe, 0x9b, 0x33, 0xe5, 0xc3, 0x2f, 0x92, 0x22, 0xfa, 0xf2, 0x4a, 0xf4, 0x37, 0x04, 0xb3, + 0xfe, 0xd2, 0x60, 0x2c, 0xb4, 0xde, 0x94, 0xab, 0xaa, 0x68, 0x33, 0x60, 0x2d, 0x31, 0xa8, 0x80, + 0x61, 0xd6, 0xd0, 0x96, 0x20, 0x22, 0x8c, 0x3b, 0x50, 0xb2, 0xfe, 0xd5, 0x40, 0x35, 0x15, 0x86, + 0x08, 0xfa, 0x85, 0xea, 0xc8, 0x3e, 0x59, 0xa9, 0xce, 0xa2, 0xb4, 0xc2, 0x3d, 0xbd, 0xb4, 0x3f, + 0x45, 0xf5, 0x69, 0xa6, 0x6c, 0x10, 0x5f, 0x4c, 0xd3, 0xab, 0xaf, 0x78, 0x4e, 0xd5, 0x16, 0xc2, + 0xcd, 0x69, 0x7a, 0x25, 0xf6, 0xd4, 0xa7, 0x4f, 0x2e, 0x2e, 0x92, 0x69, 0x96, 0x13, 0xf9, 0xd6, + 0xa5, 0x28, 0x83, 0x44, 0x56, 0xac, 0x2f, 0xcb, 0xa8, 0x12, 0xa5, 0xa3, 0x11, 0x7e, 0x9a, 0x8c, + 0xdf, 0x30, 0xdb, 0xdf, 0x43, 0x30, 0xcd, 0xaa, 0x94, 0x0c, 0xe2, 0x44, 0xac, 0x9f, 0xe5, 0x49, + 0xdf, 0x51, 0xb8, 0x54, 0x3b, 0x33, 0xbf, 0x8b, 0x76, 0xd2, 0x4f, 0xe5, 0x4b, 0x51, 0x49, 0x96, + 0xa5, 0xe4, 0xf6, 0x02, 0xce, 0x04, 0xad, 0xff, 0x28, 0xa1, 0xba, 0x72, 0x47, 0x24, 0x5a, 0x9b, + 0x35, 0x22, 0xe2, 0xfb, 0x21, 0x09, 0x31, 0xbc, 0xa5, 0x4d, 0x6e, 0x02, 0xf4, 0xc2, 0xa5, 0x13, + 0x40, 0x40, 0x11, 0xf5, 0x96, 0x46, 0x5e, 0x81, 0x91, 0x2e, 0x87, 0xb5, 0x15, 0x0c, 0x53, 0x0a, + 0x5b, 0x2b, 0x58, 0xbb, 0x1b, 0x01, 0xac, 0xda, 0x3d, 0xb1, 0x7d, 0x38, 0xd0, 0x26, 0x2c, 0x01, + 0x52, 0x37, 0x24, 0x34, 0x80, 0x47, 0xe6, 0xbd, 0x15, 0xb8, 0x61, 0x87, 0xf2, 0x1b, 0xd3, 0x32, + 0x7e, 0x4a, 0xa5, 0xf8, 0x75, 0xe9, 0x05, 0x3c, 0x93, 0x5f, 0x93, 0x1f, 0x9f, 0x0a, 0x3c, 0x60, + 0x2d, 0xb8, 0xde, 0x5a, 0x55, 0x8e, 0x03, 0x72, 0x82, 0xe1, 0xfa, 0x40, 0x7e, 0xc0, 0xd2, 0x8d, + 0x0a, 0xb7, 0xaf, 0x1f, 0x59, 0x8f, 0x51, 0x55, 0x24, 0x70, 0xb1, 0x7f, 0x7e, 0x80, 0x36, 0xf2, + 0x84, 0x1b, 0x72, 0x9e, 0xd8, 0xd5, 0xda, 0x46, 0x25, 0x9a, 0xe6, 0x32, 0x6f, 0x76, 0x4b, 0xfd, + 0x38, 0xeb, 0x9c, 0xac, 0xc5, 0x0b, 0x3b, 0xa5, 0xaf, 0xb6, 0x63, 0xfd, 0x56, 0xec, 0xf3, 0x59, + 0x3a, 0x2a, 0xf6, 0xb9, 0x89, 0xd6, 0xc6, 0xfd, 0xab, 0x24, 0x6f, 0x36, 0xf9, 0xbf, 0x79, 0x82, + 0x20, 0xbf, 0xbb, 0x62, 0xf9, 0x31, 0x6a, 0x98, 0x64, 0xda, 0xdf, 0xf0, 0x4b, 0xd6, 0x4e, 0xae, + 0xa4, 0x99, 0xeb, 0xb0, 0xfe, 0xbb, 0x2c, 0xf6, 0x67, 0x6e, 0x5e, 0x38, 0x7f, 0xd3, 0xc7, 0xb8, + 0xf2, 0x8b, 0x1f, 0xe3, 0xde, 0x45, 0xdb, 0xe7, 0xfd, 0x71, 0x3a, 0x1e, 0x9e, 0xf7, 0x47, 0xb1, + 0xf4, 0x36, 0xfb, 0x1a, 0x57, 0x57, 0xa8, 0x7c, 0x96, 0xed, 0xa3, 0xcd, 0xfe, 0x68, 0xd8, 0x9f, + 0x25, 0xe2, 0xa0, 0x2d, 0x1f, 0x56, 0xe8, 0x82, 0xb4, 0xfe, 0xb7, 0xa4, 0xff, 0xa0, 0xfb, 0x35, + 0xb4, 0x97, 0x17, 0x10, 0xdb, 0x5e, 0x2c, 0x5e, 0x69, 0x4d, 0x3b, 0xf0, 0x7c, 0xf1, 0x80, 0x28, + 0xae, 0x2e, 0xc9, 0x92, 0xbf, 0x65, 0x96, 0xb4, 0x09, 0x5b, 0xa0, 0x0d, 0xdb, 0x6d, 0xfa, 0x76, + 0x8b, 0x2d, 0x3d, 0xe3, 0x04, 0xa3, 0x69, 0x7b, 0x7e, 0xf6, 0x0b, 0xf0, 0x12, 0x28, 0x55, 0xaf, + 0xaf, 0xc0, 0x01, 0x0e, 0x08, 0xed, 0x2d, 0xbd, 0x1d, 0x04, 0x9c, 0xff, 0x1c, 0xb4, 0xf9, 0x02, + 0x1c, 0xda, 0x01, 0x86, 0x2d, 0xed, 0x49, 0x21, 0x60, 0x86, 0xe9, 0x89, 0xe7, 0x2c, 0xbf, 0xe1, + 0x24, 0x4e, 0x9c, 0x8e, 0x7c, 0x68, 0xa2, 0x15, 0x3d, 0xd9, 0xef, 0xd8, 0x4b, 0x6f, 0x86, 0x3c, + 0xa2, 0xb6, 0x17, 0x72, 0x06, 0xb5, 0x15, 0x86, 0xfc, 0xdd, 0xc1, 0x21, 0x3e, 0xd4, 0x57, 0x18, + 0xea, 0x37, 0x9d, 0x6d, 0x6d, 0x0f, 0xcb, 0xb8, 0xec, 0x33, 0xd8, 0x69, 0x6c, 0x7d, 0xb2, 0x91, + 0x9d, 0x5a, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x31, 0x03, 0x4e, 0xbd, 0xfd, 0x1f, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto new file mode 100644 index 00000000000..2fcc7953dc0 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto @@ -0,0 +1,460 @@ +syntax = "proto2"; +option go_package = "socket"; + +package appengine; + +message RemoteSocketServiceError { + enum ErrorCode { + SYSTEM_ERROR = 1; + GAI_ERROR = 2; + FAILURE = 4; + PERMISSION_DENIED = 5; + INVALID_REQUEST = 6; + SOCKET_CLOSED = 7; + } + + enum SystemError { + option allow_alias = true; + + SYS_SUCCESS = 0; + SYS_EPERM = 1; + SYS_ENOENT = 2; + SYS_ESRCH = 3; + SYS_EINTR = 4; + SYS_EIO = 5; + SYS_ENXIO = 6; + SYS_E2BIG = 7; + SYS_ENOEXEC = 8; + SYS_EBADF = 9; + SYS_ECHILD = 10; + SYS_EAGAIN = 11; + SYS_EWOULDBLOCK = 11; + SYS_ENOMEM = 12; + SYS_EACCES = 13; + SYS_EFAULT = 14; + SYS_ENOTBLK = 15; + SYS_EBUSY = 16; + SYS_EEXIST = 17; + SYS_EXDEV = 18; + SYS_ENODEV = 19; + SYS_ENOTDIR = 20; + SYS_EISDIR = 21; + SYS_EINVAL = 22; + SYS_ENFILE = 23; + SYS_EMFILE = 24; + SYS_ENOTTY = 25; + SYS_ETXTBSY = 26; + SYS_EFBIG = 27; + SYS_ENOSPC = 28; + SYS_ESPIPE = 29; + SYS_EROFS = 30; + SYS_EMLINK = 31; + SYS_EPIPE = 32; + SYS_EDOM = 33; + SYS_ERANGE = 34; + SYS_EDEADLK = 35; + SYS_EDEADLOCK = 35; + SYS_ENAMETOOLONG = 36; + SYS_ENOLCK = 37; + SYS_ENOSYS = 38; + SYS_ENOTEMPTY = 39; + SYS_ELOOP = 40; + SYS_ENOMSG = 42; + SYS_EIDRM = 43; + SYS_ECHRNG = 44; + SYS_EL2NSYNC = 45; + SYS_EL3HLT = 46; + SYS_EL3RST = 47; + SYS_ELNRNG = 48; + SYS_EUNATCH = 49; + SYS_ENOCSI = 50; + SYS_EL2HLT = 51; + SYS_EBADE = 52; + SYS_EBADR = 53; + SYS_EXFULL = 54; + SYS_ENOANO = 55; + SYS_EBADRQC = 56; + SYS_EBADSLT = 57; + SYS_EBFONT = 59; + SYS_ENOSTR = 60; + SYS_ENODATA = 61; + SYS_ETIME = 62; + SYS_ENOSR = 63; + SYS_ENONET = 64; + SYS_ENOPKG = 65; + SYS_EREMOTE = 66; + SYS_ENOLINK = 67; + SYS_EADV = 68; + SYS_ESRMNT = 69; + SYS_ECOMM = 70; + SYS_EPROTO = 71; + SYS_EMULTIHOP = 72; + SYS_EDOTDOT = 73; + SYS_EBADMSG = 74; + SYS_EOVERFLOW = 75; + SYS_ENOTUNIQ = 76; + SYS_EBADFD = 77; + SYS_EREMCHG = 78; + SYS_ELIBACC = 79; + SYS_ELIBBAD = 80; + SYS_ELIBSCN = 81; + SYS_ELIBMAX = 82; + SYS_ELIBEXEC = 83; + SYS_EILSEQ = 84; + SYS_ERESTART = 85; + SYS_ESTRPIPE = 86; + SYS_EUSERS = 87; + SYS_ENOTSOCK = 88; + SYS_EDESTADDRREQ = 89; + SYS_EMSGSIZE = 90; + SYS_EPROTOTYPE = 91; + SYS_ENOPROTOOPT = 92; + SYS_EPROTONOSUPPORT = 93; + SYS_ESOCKTNOSUPPORT = 94; + SYS_EOPNOTSUPP = 95; + SYS_ENOTSUP = 95; + SYS_EPFNOSUPPORT = 96; + SYS_EAFNOSUPPORT = 97; + SYS_EADDRINUSE = 98; + SYS_EADDRNOTAVAIL = 99; + SYS_ENETDOWN = 100; + SYS_ENETUNREACH = 101; + SYS_ENETRESET = 102; + SYS_ECONNABORTED = 103; + SYS_ECONNRESET = 104; + SYS_ENOBUFS = 105; + SYS_EISCONN = 106; + SYS_ENOTCONN = 107; + SYS_ESHUTDOWN = 108; + SYS_ETOOMANYREFS = 109; + SYS_ETIMEDOUT = 110; + SYS_ECONNREFUSED = 111; + SYS_EHOSTDOWN = 112; + SYS_EHOSTUNREACH = 113; + SYS_EALREADY = 114; + SYS_EINPROGRESS = 115; + SYS_ESTALE = 116; + SYS_EUCLEAN = 117; + SYS_ENOTNAM = 118; + SYS_ENAVAIL = 119; + SYS_EISNAM = 120; + SYS_EREMOTEIO = 121; + SYS_EDQUOT = 122; + SYS_ENOMEDIUM = 123; + SYS_EMEDIUMTYPE = 124; + SYS_ECANCELED = 125; + SYS_ENOKEY = 126; + SYS_EKEYEXPIRED = 127; + SYS_EKEYREVOKED = 128; + SYS_EKEYREJECTED = 129; + SYS_EOWNERDEAD = 130; + SYS_ENOTRECOVERABLE = 131; + SYS_ERFKILL = 132; + } + + optional int32 system_error = 1 [default=0]; + optional string error_detail = 2; +} + +message AddressPort { + required int32 port = 1; + optional bytes packed_address = 2; + + optional string hostname_hint = 3; +} + + + +message CreateSocketRequest { + enum SocketFamily { + IPv4 = 1; + IPv6 = 2; + } + + enum SocketProtocol { + TCP = 1; + UDP = 2; + } + + required SocketFamily family = 1; + required SocketProtocol protocol = 2; + + repeated SocketOption socket_options = 3; + + optional AddressPort proxy_external_ip = 4; + + optional int32 listen_backlog = 5 [default=0]; + + optional AddressPort remote_ip = 6; + + optional string app_id = 9; + + optional int64 project_id = 10; +} + +message CreateSocketReply { + optional string socket_descriptor = 1; + + optional AddressPort server_address = 3; + + optional AddressPort proxy_external_ip = 4; + + extensions 1000 to max; +} + + + +message BindRequest { + required string socket_descriptor = 1; + required AddressPort proxy_external_ip = 2; +} + +message BindReply { + optional AddressPort proxy_external_ip = 1; +} + + + +message GetSocketNameRequest { + required string socket_descriptor = 1; +} + +message GetSocketNameReply { + optional AddressPort proxy_external_ip = 2; +} + + + +message GetPeerNameRequest { + required string socket_descriptor = 1; +} + +message GetPeerNameReply { + optional AddressPort peer_ip = 2; +} + + +message SocketOption { + + enum SocketOptionLevel { + SOCKET_SOL_IP = 0; + SOCKET_SOL_SOCKET = 1; + SOCKET_SOL_TCP = 6; + SOCKET_SOL_UDP = 17; + } + + enum SocketOptionName { + option allow_alias = true; + + SOCKET_SO_DEBUG = 1; + SOCKET_SO_REUSEADDR = 2; + SOCKET_SO_TYPE = 3; + SOCKET_SO_ERROR = 4; + SOCKET_SO_DONTROUTE = 5; + SOCKET_SO_BROADCAST = 6; + SOCKET_SO_SNDBUF = 7; + SOCKET_SO_RCVBUF = 8; + SOCKET_SO_KEEPALIVE = 9; + SOCKET_SO_OOBINLINE = 10; + SOCKET_SO_LINGER = 13; + SOCKET_SO_RCVTIMEO = 20; + SOCKET_SO_SNDTIMEO = 21; + + SOCKET_IP_TOS = 1; + SOCKET_IP_TTL = 2; + SOCKET_IP_HDRINCL = 3; + SOCKET_IP_OPTIONS = 4; + + SOCKET_TCP_NODELAY = 1; + SOCKET_TCP_MAXSEG = 2; + SOCKET_TCP_CORK = 3; + SOCKET_TCP_KEEPIDLE = 4; + SOCKET_TCP_KEEPINTVL = 5; + SOCKET_TCP_KEEPCNT = 6; + SOCKET_TCP_SYNCNT = 7; + SOCKET_TCP_LINGER2 = 8; + SOCKET_TCP_DEFER_ACCEPT = 9; + SOCKET_TCP_WINDOW_CLAMP = 10; + SOCKET_TCP_INFO = 11; + SOCKET_TCP_QUICKACK = 12; + } + + required SocketOptionLevel level = 1; + required SocketOptionName option = 2; + required bytes value = 3; +} + + +message SetSocketOptionsRequest { + required string socket_descriptor = 1; + repeated SocketOption options = 2; +} + +message SetSocketOptionsReply { +} + +message GetSocketOptionsRequest { + required string socket_descriptor = 1; + repeated SocketOption options = 2; +} + +message GetSocketOptionsReply { + repeated SocketOption options = 2; +} + + +message ConnectRequest { + required string socket_descriptor = 1; + required AddressPort remote_ip = 2; + optional double timeout_seconds = 3 [default=-1]; +} + +message ConnectReply { + optional AddressPort proxy_external_ip = 1; + + extensions 1000 to max; +} + + +message ListenRequest { + required string socket_descriptor = 1; + required int32 backlog = 2; +} + +message ListenReply { +} + + +message AcceptRequest { + required string socket_descriptor = 1; + optional double timeout_seconds = 2 [default=-1]; +} + +message AcceptReply { + optional bytes new_socket_descriptor = 2; + optional AddressPort remote_address = 3; +} + + + +message ShutDownRequest { + enum How { + SOCKET_SHUT_RD = 1; + SOCKET_SHUT_WR = 2; + SOCKET_SHUT_RDWR = 3; + } + required string socket_descriptor = 1; + required How how = 2; + required int64 send_offset = 3; +} + +message ShutDownReply { +} + + + +message CloseRequest { + required string socket_descriptor = 1; + optional int64 send_offset = 2 [default=-1]; +} + +message CloseReply { +} + + + +message SendRequest { + required string socket_descriptor = 1; + required bytes data = 2 [ctype=CORD]; + required int64 stream_offset = 3; + optional int32 flags = 4 [default=0]; + optional AddressPort send_to = 5; + optional double timeout_seconds = 6 [default=-1]; +} + +message SendReply { + optional int32 data_sent = 1; +} + + +message ReceiveRequest { + enum Flags { + MSG_OOB = 1; + MSG_PEEK = 2; + } + required string socket_descriptor = 1; + required int32 data_size = 2; + optional int32 flags = 3 [default=0]; + optional double timeout_seconds = 5 [default=-1]; +} + +message ReceiveReply { + optional int64 stream_offset = 2; + optional bytes data = 3 [ctype=CORD]; + optional AddressPort received_from = 4; + optional int32 buffer_size = 5; +} + + + +message PollEvent { + + enum PollEventFlag { + SOCKET_POLLNONE = 0; + SOCKET_POLLIN = 1; + SOCKET_POLLPRI = 2; + SOCKET_POLLOUT = 4; + SOCKET_POLLERR = 8; + SOCKET_POLLHUP = 16; + SOCKET_POLLNVAL = 32; + SOCKET_POLLRDNORM = 64; + SOCKET_POLLRDBAND = 128; + SOCKET_POLLWRNORM = 256; + SOCKET_POLLWRBAND = 512; + SOCKET_POLLMSG = 1024; + SOCKET_POLLREMOVE = 4096; + SOCKET_POLLRDHUP = 8192; + }; + + required string socket_descriptor = 1; + required int32 requested_events = 2; + required int32 observed_events = 3; +} + +message PollRequest { + repeated PollEvent events = 1; + optional double timeout_seconds = 2 [default=-1]; +} + +message PollReply { + repeated PollEvent events = 2; +} + +message ResolveRequest { + required string name = 1; + repeated CreateSocketRequest.SocketFamily address_families = 2; +} + +message ResolveReply { + enum ErrorCode { + SOCKET_EAI_ADDRFAMILY = 1; + SOCKET_EAI_AGAIN = 2; + SOCKET_EAI_BADFLAGS = 3; + SOCKET_EAI_FAIL = 4; + SOCKET_EAI_FAMILY = 5; + SOCKET_EAI_MEMORY = 6; + SOCKET_EAI_NODATA = 7; + SOCKET_EAI_NONAME = 8; + SOCKET_EAI_SERVICE = 9; + SOCKET_EAI_SOCKTYPE = 10; + SOCKET_EAI_SYSTEM = 11; + SOCKET_EAI_BADHINTS = 12; + SOCKET_EAI_PROTOCOL = 13; + SOCKET_EAI_OVERFLOW = 14; + SOCKET_EAI_MAX = 15; + }; + + repeated bytes packed_address = 2; + optional string canonical_name = 3; + repeated string aliases = 4; +} diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go new file mode 100644 index 00000000000..3de46df826b --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/doc.go @@ -0,0 +1,10 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package socket provides outbound network sockets. +// +// This package is only required in the classic App Engine environment. +// Applications running only in App Engine "flexible environment" should +// use the standard library's net package. +package socket diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go new file mode 100644 index 00000000000..0ad50e2d36d --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/socket_classic.go @@ -0,0 +1,290 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package socket + +import ( + "fmt" + "io" + "net" + "strconv" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/appengine/internal" + + pb "google.golang.org/appengine/internal/socket" +) + +// Dial connects to the address addr on the network protocol. +// The address format is host:port, where host may be a hostname or an IP address. +// Known protocols are "tcp" and "udp". +// The returned connection satisfies net.Conn, and is valid while ctx is valid; +// if the connection is to be used after ctx becomes invalid, invoke SetContext +// with the new context. +func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { + return DialTimeout(ctx, protocol, addr, 0) +} + +var ipFamilies = []pb.CreateSocketRequest_SocketFamily{ + pb.CreateSocketRequest_IPv4, + pb.CreateSocketRequest_IPv6, +} + +// DialTimeout is like Dial but takes a timeout. +// The timeout includes name resolution, if required. +func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { + dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn. + if timeout > 0 { + var cancel context.CancelFunc + dialCtx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err) + } + + var prot pb.CreateSocketRequest_SocketProtocol + switch protocol { + case "tcp": + prot = pb.CreateSocketRequest_TCP + case "udp": + prot = pb.CreateSocketRequest_UDP + default: + return nil, fmt.Errorf("socket: unknown protocol %q", protocol) + } + + packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host) + if err != nil { + return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) + } + if len(packedAddrs) == 0 { + return nil, fmt.Errorf("no addresses for %q", host) + } + + packedAddr := packedAddrs[0] // use first address + fam := pb.CreateSocketRequest_IPv4 + if len(packedAddr) == net.IPv6len { + fam = pb.CreateSocketRequest_IPv6 + } + + req := &pb.CreateSocketRequest{ + Family: fam.Enum(), + Protocol: prot.Enum(), + RemoteIp: &pb.AddressPort{ + Port: proto.Int32(int32(port)), + PackedAddress: packedAddr, + }, + } + if resolved { + req.RemoteIp.HostnameHint = &host + } + res := &pb.CreateSocketReply{} + if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil { + return nil, err + } + + return &Conn{ + ctx: ctx, + desc: res.GetSocketDescriptor(), + prot: prot, + local: res.ProxyExternalIp, + remote: req.RemoteIp, + }, nil +} + +// LookupIP returns the given host's IP addresses. +func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { + packedAddrs, _, err := resolve(ctx, ipFamilies, host) + if err != nil { + return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) + } + addrs = make([]net.IP, len(packedAddrs)) + for i, pa := range packedAddrs { + addrs[i] = net.IP(pa) + } + return addrs, nil +} + +func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) { + // Check if it's an IP address. + if ip := net.ParseIP(host); ip != nil { + if ip := ip.To4(); ip != nil { + return [][]byte{ip}, false, nil + } + return [][]byte{ip}, false, nil + } + + req := &pb.ResolveRequest{ + Name: &host, + AddressFamilies: fams, + } + res := &pb.ResolveReply{} + if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil { + // XXX: need to map to pb.ResolveReply_ErrorCode? + return nil, false, err + } + return res.PackedAddress, true, nil +} + +// withDeadline is like context.WithDeadline, except it ignores the zero deadline. +func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { + if deadline.IsZero() { + return parent, func() {} + } + return context.WithDeadline(parent, deadline) +} + +// Conn represents a socket connection. +// It implements net.Conn. +type Conn struct { + ctx context.Context + desc string + offset int64 + + prot pb.CreateSocketRequest_SocketProtocol + local, remote *pb.AddressPort + + readDeadline, writeDeadline time.Time // optional +} + +// SetContext sets the context that is used by this Conn. +// It is usually used only when using a Conn that was created in a different context, +// such as when a connection is created during a warmup request but used while +// servicing a user request. +func (cn *Conn) SetContext(ctx context.Context) { + cn.ctx = ctx +} + +func (cn *Conn) Read(b []byte) (n int, err error) { + const maxRead = 1 << 20 + if len(b) > maxRead { + b = b[:maxRead] + } + + req := &pb.ReceiveRequest{ + SocketDescriptor: &cn.desc, + DataSize: proto.Int32(int32(len(b))), + } + res := &pb.ReceiveReply{} + if !cn.readDeadline.IsZero() { + req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) + } + ctx, cancel := withDeadline(cn.ctx, cn.readDeadline) + defer cancel() + if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil { + return 0, err + } + if len(res.Data) == 0 { + return 0, io.EOF + } + if len(res.Data) > len(b) { + return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) + } + return copy(b, res.Data), nil +} + +func (cn *Conn) Write(b []byte) (n int, err error) { + const lim = 1 << 20 // max per chunk + + for n < len(b) { + chunk := b[n:] + if len(chunk) > lim { + chunk = chunk[:lim] + } + + req := &pb.SendRequest{ + SocketDescriptor: &cn.desc, + Data: chunk, + StreamOffset: &cn.offset, + } + res := &pb.SendReply{} + if !cn.writeDeadline.IsZero() { + req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) + } + ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline) + defer cancel() + if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil { + // assume zero bytes were sent in this RPC + break + } + n += int(res.GetDataSent()) + cn.offset += int64(res.GetDataSent()) + } + + return +} + +func (cn *Conn) Close() error { + req := &pb.CloseRequest{ + SocketDescriptor: &cn.desc, + } + res := &pb.CloseReply{} + if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil { + return err + } + cn.desc = "CLOSED" + return nil +} + +func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr { + if ap == nil { + return nil + } + switch prot { + case pb.CreateSocketRequest_TCP: + return &net.TCPAddr{ + IP: net.IP(ap.PackedAddress), + Port: int(*ap.Port), + } + case pb.CreateSocketRequest_UDP: + return &net.UDPAddr{ + IP: net.IP(ap.PackedAddress), + Port: int(*ap.Port), + } + } + panic("unknown protocol " + prot.String()) +} + +func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) } +func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) } + +func (cn *Conn) SetDeadline(t time.Time) error { + cn.readDeadline = t + cn.writeDeadline = t + return nil +} + +func (cn *Conn) SetReadDeadline(t time.Time) error { + cn.readDeadline = t + return nil +} + +func (cn *Conn) SetWriteDeadline(t time.Time) error { + cn.writeDeadline = t + return nil +} + +// KeepAlive signals that the connection is still in use. +// It may be called to prevent the socket being closed due to inactivity. +func (cn *Conn) KeepAlive() error { + req := &pb.GetSocketNameRequest{ + SocketDescriptor: &cn.desc, + } + res := &pb.GetSocketNameReply{} + return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res) +} + +func init() { + internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go new file mode 100644 index 00000000000..c804169a1c0 --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/socket_vm.go @@ -0,0 +1,64 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package socket + +import ( + "net" + "time" + + "golang.org/x/net/context" +) + +// Dial connects to the address addr on the network protocol. +// The address format is host:port, where host may be a hostname or an IP address. +// Known protocols are "tcp" and "udp". +// The returned connection satisfies net.Conn, and is valid while ctx is valid; +// if the connection is to be used after ctx becomes invalid, invoke SetContext +// with the new context. +func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { + conn, err := net.Dial(protocol, addr) + if err != nil { + return nil, err + } + return &Conn{conn}, nil +} + +// DialTimeout is like Dial but takes a timeout. +// The timeout includes name resolution, if required. +func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { + conn, err := net.DialTimeout(protocol, addr, timeout) + if err != nil { + return nil, err + } + return &Conn{conn}, nil +} + +// LookupIP returns the given host's IP addresses. +func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { + return net.LookupIP(host) +} + +// Conn represents a socket connection. +// It implements net.Conn. +type Conn struct { + net.Conn +} + +// SetContext sets the context that is used by this Conn. +// It is usually used only when using a Conn that was created in a different context, +// such as when a connection is created during a warmup request but used while +// servicing a user request. +func (cn *Conn) SetContext(ctx context.Context) { + // This function is not required in App Engine "flexible environment". +} + +// KeepAlive signals that the connection is still in use. +// It may be called to prevent the socket being closed due to inactivity. +func (cn *Conn) KeepAlive() error { + // This function is not required in App Engine "flexible environment". + return nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go new file mode 100644 index 00000000000..191bea48c86 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go @@ -0,0 +1,119 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/api/annotations.proto + +package annotations + +import ( + reflect "reflect" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_google_api_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*HttpRule)(nil), + Field: 72295728, + Name: "google.api.http", + Tag: "bytes,72295728,opt,name=http", + Filename: "google/api/annotations.proto", + }, +} + +// Extension fields to descriptorpb.MethodOptions. +var ( + // See `HttpRule`. + // + // optional google.api.HttpRule http = 72295728; + E_Http = &file_google_api_annotations_proto_extTypes[0] +) + +var File_google_api_annotations_proto protoreflect.FileDescriptor + +var file_google_api_annotations_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x3a, 0x4b, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb0, 0xca, 0xbc, 0x22, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, + 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_google_api_annotations_proto_goTypes = []interface{}{ + (*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions + (*HttpRule)(nil), // 1: google.api.HttpRule +} +var file_google_api_annotations_proto_depIdxs = []int32{ + 0, // 0: google.api.http:extendee -> google.protobuf.MethodOptions + 1, // 1: google.api.http:type_name -> google.api.HttpRule + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_api_annotations_proto_init() } +func file_google_api_annotations_proto_init() { + if File_google_api_annotations_proto != nil { + return + } + file_google_api_http_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_annotations_proto_goTypes, + DependencyIndexes: file_google_api_annotations_proto_depIdxs, + ExtensionInfos: file_google_api_annotations_proto_extTypes, + }.Build() + File_google_api_annotations_proto = out.File + file_google_api_annotations_proto_rawDesc = nil + file_google_api_annotations_proto_goTypes = nil + file_google_api_annotations_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go new file mode 100644 index 00000000000..ec7c602ecf9 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -0,0 +1,1652 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.18.1 +// source: google/api/client.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + api "google.golang.org/genproto/googleapis/api" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The organization for which the client libraries are being published. +// Affects the url where generated docs are published, etc. +type ClientLibraryOrganization int32 + +const ( + // Not useful. + ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED ClientLibraryOrganization = 0 + // Google Cloud Platform Org. + ClientLibraryOrganization_CLOUD ClientLibraryOrganization = 1 + // Ads (Advertising) Org. + ClientLibraryOrganization_ADS ClientLibraryOrganization = 2 + // Photos Org. + ClientLibraryOrganization_PHOTOS ClientLibraryOrganization = 3 + // Street View Org. + ClientLibraryOrganization_STREET_VIEW ClientLibraryOrganization = 4 +) + +// Enum value maps for ClientLibraryOrganization. +var ( + ClientLibraryOrganization_name = map[int32]string{ + 0: "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED", + 1: "CLOUD", + 2: "ADS", + 3: "PHOTOS", + 4: "STREET_VIEW", + } + ClientLibraryOrganization_value = map[string]int32{ + "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED": 0, + "CLOUD": 1, + "ADS": 2, + "PHOTOS": 3, + "STREET_VIEW": 4, + } +) + +func (x ClientLibraryOrganization) Enum() *ClientLibraryOrganization { + p := new(ClientLibraryOrganization) + *p = x + return p +} + +func (x ClientLibraryOrganization) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryOrganization) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[0].Descriptor() +} + +func (ClientLibraryOrganization) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[0] +} + +func (x ClientLibraryOrganization) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryOrganization.Descriptor instead. +func (ClientLibraryOrganization) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +// To where should client libraries be published? +type ClientLibraryDestination int32 + +const ( + // Client libraries will neither be generated nor published to package + // managers. + ClientLibraryDestination_CLIENT_LIBRARY_DESTINATION_UNSPECIFIED ClientLibraryDestination = 0 + // Generate the client library in a repo under github.com/googleapis, + // but don't publish it to package managers. + ClientLibraryDestination_GITHUB ClientLibraryDestination = 10 + // Publish the library to package managers like nuget.org and npmjs.com. + ClientLibraryDestination_PACKAGE_MANAGER ClientLibraryDestination = 20 +) + +// Enum value maps for ClientLibraryDestination. +var ( + ClientLibraryDestination_name = map[int32]string{ + 0: "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED", + 10: "GITHUB", + 20: "PACKAGE_MANAGER", + } + ClientLibraryDestination_value = map[string]int32{ + "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED": 0, + "GITHUB": 10, + "PACKAGE_MANAGER": 20, + } +) + +func (x ClientLibraryDestination) Enum() *ClientLibraryDestination { + p := new(ClientLibraryDestination) + *p = x + return p +} + +func (x ClientLibraryDestination) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryDestination) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[1].Descriptor() +} + +func (ClientLibraryDestination) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[1] +} + +func (x ClientLibraryDestination) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryDestination.Descriptor instead. +func (ClientLibraryDestination) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +// Required information for every language. +type CommonLanguageSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Link to automatically generated reference documentation. Example: + // https://cloud.google.com/nodejs/docs/reference/asset/latest + ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"` + // The destination where API teams want this client library to be published. + Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` +} + +func (x *CommonLanguageSettings) Reset() { + *x = CommonLanguageSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonLanguageSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonLanguageSettings) ProtoMessage() {} + +func (x *CommonLanguageSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonLanguageSettings.ProtoReflect.Descriptor instead. +func (*CommonLanguageSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +func (x *CommonLanguageSettings) GetReferenceDocsUri() string { + if x != nil { + return x.ReferenceDocsUri + } + return "" +} + +func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination { + if x != nil { + return x.Destinations + } + return nil +} + +// Details about how and where to publish client libraries. +type ClientLibrarySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Version of the API to apply these settings to. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Launch stage of this version of the API. + LaunchStage api.LaunchStage `protobuf:"varint,2,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + // When using transport=rest, the client request will encode enums as + // numbers rather than strings. + RestNumericEnums bool `protobuf:"varint,3,opt,name=rest_numeric_enums,json=restNumericEnums,proto3" json:"rest_numeric_enums,omitempty"` + // Settings for legacy Java features, supported in the Service YAML. + JavaSettings *JavaSettings `protobuf:"bytes,21,opt,name=java_settings,json=javaSettings,proto3" json:"java_settings,omitempty"` + // Settings for C++ client libraries. + CppSettings *CppSettings `protobuf:"bytes,22,opt,name=cpp_settings,json=cppSettings,proto3" json:"cpp_settings,omitempty"` + // Settings for PHP client libraries. + PhpSettings *PhpSettings `protobuf:"bytes,23,opt,name=php_settings,json=phpSettings,proto3" json:"php_settings,omitempty"` + // Settings for Python client libraries. + PythonSettings *PythonSettings `protobuf:"bytes,24,opt,name=python_settings,json=pythonSettings,proto3" json:"python_settings,omitempty"` + // Settings for Node client libraries. + NodeSettings *NodeSettings `protobuf:"bytes,25,opt,name=node_settings,json=nodeSettings,proto3" json:"node_settings,omitempty"` + // Settings for .NET client libraries. + DotnetSettings *DotnetSettings `protobuf:"bytes,26,opt,name=dotnet_settings,json=dotnetSettings,proto3" json:"dotnet_settings,omitempty"` + // Settings for Ruby client libraries. + RubySettings *RubySettings `protobuf:"bytes,27,opt,name=ruby_settings,json=rubySettings,proto3" json:"ruby_settings,omitempty"` + // Settings for Go client libraries. + GoSettings *GoSettings `protobuf:"bytes,28,opt,name=go_settings,json=goSettings,proto3" json:"go_settings,omitempty"` +} + +func (x *ClientLibrarySettings) Reset() { + *x = ClientLibrarySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientLibrarySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientLibrarySettings) ProtoMessage() {} + +func (x *ClientLibrarySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientLibrarySettings.ProtoReflect.Descriptor instead. +func (*ClientLibrarySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientLibrarySettings) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ClientLibrarySettings) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +func (x *ClientLibrarySettings) GetRestNumericEnums() bool { + if x != nil { + return x.RestNumericEnums + } + return false +} + +func (x *ClientLibrarySettings) GetJavaSettings() *JavaSettings { + if x != nil { + return x.JavaSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetCppSettings() *CppSettings { + if x != nil { + return x.CppSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPhpSettings() *PhpSettings { + if x != nil { + return x.PhpSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPythonSettings() *PythonSettings { + if x != nil { + return x.PythonSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetNodeSettings() *NodeSettings { + if x != nil { + return x.NodeSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetDotnetSettings() *DotnetSettings { + if x != nil { + return x.DotnetSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetRubySettings() *RubySettings { + if x != nil { + return x.RubySettings + } + return nil +} + +func (x *ClientLibrarySettings) GetGoSettings() *GoSettings { + if x != nil { + return x.GoSettings + } + return nil +} + +// This message configures the settings for publishing [Google Cloud Client +// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) +// generated from the service config. +type Publishing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of API method settings, e.g. the behavior for methods that use the + // long-running operation pattern. + MethodSettings []*MethodSettings `protobuf:"bytes,2,rep,name=method_settings,json=methodSettings,proto3" json:"method_settings,omitempty"` + // Link to a place that API users can report issues. Example: + // https://issuetracker.google.com/issues/new?component=190865&template=1161103 + NewIssueUri string `protobuf:"bytes,101,opt,name=new_issue_uri,json=newIssueUri,proto3" json:"new_issue_uri,omitempty"` + // Link to product home page. Example: + // https://cloud.google.com/asset-inventory/docs/overview + DocumentationUri string `protobuf:"bytes,102,opt,name=documentation_uri,json=documentationUri,proto3" json:"documentation_uri,omitempty"` + // Used as a tracking tag when collecting data about the APIs developer + // relations artifacts like docs, packages delivered to package managers, + // etc. Example: "speech". + ApiShortName string `protobuf:"bytes,103,opt,name=api_short_name,json=apiShortName,proto3" json:"api_short_name,omitempty"` + // GitHub label to apply to issues and pull requests opened for this API. + GithubLabel string `protobuf:"bytes,104,opt,name=github_label,json=githubLabel,proto3" json:"github_label,omitempty"` + // GitHub teams to be added to CODEOWNERS in the directory in GitHub + // containing source code for the client libraries for this API. + CodeownerGithubTeams []string `protobuf:"bytes,105,rep,name=codeowner_github_teams,json=codeownerGithubTeams,proto3" json:"codeowner_github_teams,omitempty"` + // A prefix used in sample code when demarking regions to be included in + // documentation. + DocTagPrefix string `protobuf:"bytes,106,opt,name=doc_tag_prefix,json=docTagPrefix,proto3" json:"doc_tag_prefix,omitempty"` + // For whom the client library is being published. + Organization ClientLibraryOrganization `protobuf:"varint,107,opt,name=organization,proto3,enum=google.api.ClientLibraryOrganization" json:"organization,omitempty"` + // Client library settings. If the same version string appears multiple + // times in this list, then the last one wins. Settings from earlier + // settings with the same version string are discarded. + LibrarySettings []*ClientLibrarySettings `protobuf:"bytes,109,rep,name=library_settings,json=librarySettings,proto3" json:"library_settings,omitempty"` +} + +func (x *Publishing) Reset() { + *x = Publishing{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Publishing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Publishing) ProtoMessage() {} + +func (x *Publishing) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Publishing.ProtoReflect.Descriptor instead. +func (*Publishing) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{2} +} + +func (x *Publishing) GetMethodSettings() []*MethodSettings { + if x != nil { + return x.MethodSettings + } + return nil +} + +func (x *Publishing) GetNewIssueUri() string { + if x != nil { + return x.NewIssueUri + } + return "" +} + +func (x *Publishing) GetDocumentationUri() string { + if x != nil { + return x.DocumentationUri + } + return "" +} + +func (x *Publishing) GetApiShortName() string { + if x != nil { + return x.ApiShortName + } + return "" +} + +func (x *Publishing) GetGithubLabel() string { + if x != nil { + return x.GithubLabel + } + return "" +} + +func (x *Publishing) GetCodeownerGithubTeams() []string { + if x != nil { + return x.CodeownerGithubTeams + } + return nil +} + +func (x *Publishing) GetDocTagPrefix() string { + if x != nil { + return x.DocTagPrefix + } + return "" +} + +func (x *Publishing) GetOrganization() ClientLibraryOrganization { + if x != nil { + return x.Organization + } + return ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED +} + +func (x *Publishing) GetLibrarySettings() []*ClientLibrarySettings { + if x != nil { + return x.LibrarySettings + } + return nil +} + +// Settings for Java client libraries. +type JavaSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The package name to use in Java. Clobbers the java_package option + // set in the protobuf. This should be used **only** by APIs + // who have already set the language_settings.java.package_name" field + // in gapic.yaml. API teams should use the protobuf java_package option + // where possible. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // library_package: com.google.cloud.pubsub.v1 + LibraryPackage string `protobuf:"bytes,1,opt,name=library_package,json=libraryPackage,proto3" json:"library_package,omitempty"` + // Configure the Java class name to use instead of the service's for its + // corresponding generated GAPIC client. Keys are fully-qualified + // service names as they appear in the protobuf (including the full + // the language_settings.java.interface_names" field in gapic.yaml. API + // teams should otherwise use the service name as it appears in the + // protobuf. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // service_class_names: + // - google.pubsub.v1.Publisher: TopicAdmin + // - google.pubsub.v1.Subscriber: SubscriptionAdmin + ServiceClassNames map[string]string `protobuf:"bytes,2,rep,name=service_class_names,json=serviceClassNames,proto3" json:"service_class_names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,3,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *JavaSettings) Reset() { + *x = JavaSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JavaSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JavaSettings) ProtoMessage() {} + +func (x *JavaSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JavaSettings.ProtoReflect.Descriptor instead. +func (*JavaSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{3} +} + +func (x *JavaSettings) GetLibraryPackage() string { + if x != nil { + return x.LibraryPackage + } + return "" +} + +func (x *JavaSettings) GetServiceClassNames() map[string]string { + if x != nil { + return x.ServiceClassNames + } + return nil +} + +func (x *JavaSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for C++ client libraries. +type CppSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *CppSettings) Reset() { + *x = CppSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CppSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CppSettings) ProtoMessage() {} + +func (x *CppSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CppSettings.ProtoReflect.Descriptor instead. +func (*CppSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{4} +} + +func (x *CppSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Php client libraries. +type PhpSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *PhpSettings) Reset() { + *x = PhpSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PhpSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PhpSettings) ProtoMessage() {} + +func (x *PhpSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PhpSettings.ProtoReflect.Descriptor instead. +func (*PhpSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{5} +} + +func (x *PhpSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Python client libraries. +type PythonSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *PythonSettings) Reset() { + *x = PythonSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings) ProtoMessage() {} + +func (x *PythonSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings.ProtoReflect.Descriptor instead. +func (*PythonSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6} +} + +func (x *PythonSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Node client libraries. +type NodeSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *NodeSettings) Reset() { + *x = NodeSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NodeSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NodeSettings) ProtoMessage() {} + +func (x *NodeSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NodeSettings.ProtoReflect.Descriptor instead. +func (*NodeSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{7} +} + +func (x *NodeSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Dotnet client libraries. +type DotnetSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *DotnetSettings) Reset() { + *x = DotnetSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DotnetSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DotnetSettings) ProtoMessage() {} + +func (x *DotnetSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DotnetSettings.ProtoReflect.Descriptor instead. +func (*DotnetSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{8} +} + +func (x *DotnetSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Ruby client libraries. +type RubySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *RubySettings) Reset() { + *x = RubySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RubySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RubySettings) ProtoMessage() {} + +func (x *RubySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RubySettings.ProtoReflect.Descriptor instead. +func (*RubySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{9} +} + +func (x *RubySettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Go client libraries. +type GoSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *GoSettings) Reset() { + *x = GoSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoSettings) ProtoMessage() {} + +func (x *GoSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoSettings.ProtoReflect.Descriptor instead. +func (*GoSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{10} +} + +func (x *GoSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Describes the generator configuration for a method. +type MethodSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of the method, for which the options below apply. + // This is used to find the method to apply the options. + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Describes settings to use for long-running operations when generating + // API methods for RPCs. Complements RPCs that use the annotations in + // google/longrunning/operations.proto. + // + // Example of a YAML configuration:: + // + // publishing: + // method_behavior: + // - selector: CreateAdDomain + // long_running: + // initial_poll_delay: + // seconds: 60 # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: + // seconds: 360 # 6 minutes + // total_poll_timeout: + // seconds: 54000 # 90 minutes + LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` +} + +func (x *MethodSettings) Reset() { + *x = MethodSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings) ProtoMessage() {} + +func (x *MethodSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings.ProtoReflect.Descriptor instead. +func (*MethodSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11} +} + +func (x *MethodSettings) GetSelector() string { + if x != nil { + return x.Selector + } + return "" +} + +func (x *MethodSettings) GetLongRunning() *MethodSettings_LongRunning { + if x != nil { + return x.LongRunning + } + return nil +} + +// Describes settings to use when generating API methods that use the +// long-running operation pattern. +// All default values below are from those used in the client library +// generators (e.g. +// [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). +type MethodSettings_LongRunning struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Initial delay after which the first poll request will be made. + // Default value: 5 seconds. + InitialPollDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=initial_poll_delay,json=initialPollDelay,proto3" json:"initial_poll_delay,omitempty"` + // Multiplier to gradually increase delay between subsequent polls until it + // reaches max_poll_delay. + // Default value: 1.5. + PollDelayMultiplier float32 `protobuf:"fixed32,2,opt,name=poll_delay_multiplier,json=pollDelayMultiplier,proto3" json:"poll_delay_multiplier,omitempty"` + // Maximum time between two subsequent poll requests. + // Default value: 45 seconds. + MaxPollDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=max_poll_delay,json=maxPollDelay,proto3" json:"max_poll_delay,omitempty"` + // Total polling timeout. + // Default value: 5 minutes. + TotalPollTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=total_poll_timeout,json=totalPollTimeout,proto3" json:"total_poll_timeout,omitempty"` +} + +func (x *MethodSettings_LongRunning) Reset() { + *x = MethodSettings_LongRunning{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings_LongRunning) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings_LongRunning) ProtoMessage() {} + +func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings_LongRunning.ProtoReflect.Descriptor instead. +func (*MethodSettings_LongRunning) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *MethodSettings_LongRunning) GetInitialPollDelay() *durationpb.Duration { + if x != nil { + return x.InitialPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetPollDelayMultiplier() float32 { + if x != nil { + return x.PollDelayMultiplier + } + return 0 +} + +func (x *MethodSettings_LongRunning) GetMaxPollDelay() *durationpb.Duration { + if x != nil { + return x.MaxPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetTotalPollTimeout() *durationpb.Duration { + if x != nil { + return x.TotalPollTimeout + } + return nil +} + +var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: ([]string)(nil), + Field: 1051, + Name: "google.api.method_signature", + Tag: "bytes,1051,rep,name=method_signature", + Filename: "google/api/client.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 1049, + Name: "google.api.default_host", + Tag: "bytes,1049,opt,name=default_host", + Filename: "google/api/client.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 1050, + Name: "google.api.oauth_scopes", + Tag: "bytes,1050,opt,name=oauth_scopes", + Filename: "google/api/client.proto", + }, +} + +// Extension fields to descriptorpb.MethodOptions. +var ( + // A definition of a client library method signature. + // + // In client libraries, each proto RPC corresponds to one or more methods + // which the end user is able to call, and calls the underlying RPC. + // Normally, this method receives a single argument (a struct or instance + // corresponding to the RPC request object). Defining this field will + // add one or more overloads providing flattened or simpler method signatures + // in some languages. + // + // The fields on the method signature are provided as a comma-separated + // string. + // + // For example, the proto RPC and annotation: + // + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } + // + // Would add the following Java overload (in addition to the method accepting + // the request object): + // + // public final Subscription createSubscription(String name, String topic) + // + // The following backwards-compatibility guidelines apply: + // + // - Adding this annotation to an unannotated method is backwards + // compatible. + // - Adding this annotation to a method which already has existing + // method signature annotations is backwards compatible if and only if + // the new method signature annotation is last in the sequence. + // - Modifying or removing an existing method signature annotation is + // a breaking change. + // - Re-ordering existing method signature annotations is a breaking + // change. + // + // repeated string method_signature = 1051; + E_MethodSignature = &file_google_api_client_proto_extTypes[0] +) + +// Extension fields to descriptorpb.ServiceOptions. +var ( + // The hostname for this service. + // This should be specified with no prefix or protocol. + // + // Example: + // + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } + // + // optional string default_host = 1049; + E_DefaultHost = &file_google_api_client_proto_extTypes[1] + // OAuth scopes needed for the client. + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } + // + // If there is more than one scope, use a comma-separated string: + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } + // + // optional string oauth_scopes = 1050; + E_OauthScopes = &file_google_api_client_proto_extTypes[2] +) + +var File_google_api_client_proto protoreflect.FileDescriptor + +var file_google_api_client_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x90, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, + 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x73, 0x55, 0x72, 0x69, 0x12, + 0x48, 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, + 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x64, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05, 0x0a, 0x15, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, + 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, + 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x73, + 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e, 0x75, 0x6d, 0x65, 0x72, + 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a, 0x61, 0x76, 0x61, 0x5f, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, + 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72, 0x75, 0x62, 0x79, 0x5f, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x62, 0x79, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, + 0xe0, 0x03, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, + 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, + 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x49, + 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x63, 0x75, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x66, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f, 0x73, 0x68, 0x6f, 0x72, + 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, + 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x34, 0x0a, + 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x63, + 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68, 0x75, 0x62, 0x54, 0x65, + 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x63, + 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a, 0x0c, 0x6f, 0x72, 0x67, + 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, + 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, + 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, + 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, + 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, + 0x4c, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, + 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, + 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, + 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, + 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x1a, 0x94, 0x02, + 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, + 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, + 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, + 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, + 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, + 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, + 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x2a, 0x79, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, + 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, + 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, + 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, + 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, + 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x2a, + 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, + 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, + 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, + 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, + 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, + 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, + 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, + 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_api_client_proto_rawDescOnce sync.Once + file_google_api_client_proto_rawDescData = file_google_api_client_proto_rawDesc +) + +func file_google_api_client_proto_rawDescGZIP() []byte { + file_google_api_client_proto_rawDescOnce.Do(func() { + file_google_api_client_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_client_proto_rawDescData) + }) + return file_google_api_client_proto_rawDescData +} + +var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_google_api_client_proto_goTypes = []interface{}{ + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry + (*MethodSettings_LongRunning)(nil), // 15: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 16: google.api.LaunchStage + (*durationpb.Duration)(nil), // 17: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 18: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 19: google.protobuf.ServiceOptions +} +var file_google_api_client_proto_depIdxs = []int32{ + 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination + 16, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings + 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings + 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings + 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings + 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings + 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings + 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings + 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings + 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings + 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization + 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings + 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry + 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 20: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 21: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 15, // 22: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 17, // 23: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 17, // 24: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 17, // 25: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 18, // 26: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 19, // 27: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 19, // 28: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 29, // [29:29] is the sub-list for method output_type + 29, // [29:29] is the sub-list for method input_type + 29, // [29:29] is the sub-list for extension type_name + 26, // [26:29] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name +} + +func init() { file_google_api_client_proto_init() } +func file_google_api_client_proto_init() { + if File_google_api_client_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_client_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonLanguageSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientLibrarySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Publishing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JavaSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CppSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PhpSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DotnetSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RubySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GoSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings_LongRunning); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_client_proto_rawDesc, + NumEnums: 2, + NumMessages: 14, + NumExtensions: 3, + NumServices: 0, + }, + GoTypes: file_google_api_client_proto_goTypes, + DependencyIndexes: file_google_api_client_proto_depIdxs, + EnumInfos: file_google_api_client_proto_enumTypes, + MessageInfos: file_google_api_client_proto_msgTypes, + ExtensionInfos: file_google_api_client_proto_extTypes, + }.Build() + File_google_api_client_proto = out.File + file_google_api_client_proto_rawDesc = nil + file_google_api_client_proto_goTypes = nil + file_google_api_client_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go new file mode 100644 index 00000000000..164e0df0bf5 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -0,0 +1,250 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.12.2 +// source: google/api/field_behavior.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// An indicator of the behavior of a given field (for example, that a field +// is required in requests, or given as output but ignored as input). +// This **does not** change the behavior in protocol buffers itself; it only +// denotes the behavior and may affect how API tooling handles the field. +// +// Note: This enum **may** receive new values in the future. +type FieldBehavior int32 + +const ( + // Conventional default for enums. Do not use this. + FieldBehavior_FIELD_BEHAVIOR_UNSPECIFIED FieldBehavior = 0 + // Specifically denotes a field as optional. + // While all fields in protocol buffers are optional, this may be specified + // for emphasis if appropriate. + FieldBehavior_OPTIONAL FieldBehavior = 1 + // Denotes a field as required. + // This indicates that the field **must** be provided as part of the request, + // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + FieldBehavior_REQUIRED FieldBehavior = 2 + // Denotes a field as output only. + // This indicates that the field is provided in responses, but including the + // field in a request does nothing (the server *must* ignore it and + // *must not* throw an error as a result of the field's presence). + FieldBehavior_OUTPUT_ONLY FieldBehavior = 3 + // Denotes a field as input only. + // This indicates that the field is provided in requests, and the + // corresponding field is not included in output. + FieldBehavior_INPUT_ONLY FieldBehavior = 4 + // Denotes a field as immutable. + // This indicates that the field may be set once in a request to create a + // resource, but may not be changed thereafter. + FieldBehavior_IMMUTABLE FieldBehavior = 5 + // Denotes that a (repeated) field is an unordered list. + // This indicates that the service may provide the elements of the list + // in any arbitrary order, rather than the order the user originally + // provided. Additionally, the list's order may or may not be stable. + FieldBehavior_UNORDERED_LIST FieldBehavior = 6 + // Denotes that this field returns a non-empty default value if not set. + // This indicates that if the user provides the empty value in a request, + // a non-empty value will be returned. The user will not be aware of what + // non-empty value to expect. + FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7 +) + +// Enum value maps for FieldBehavior. +var ( + FieldBehavior_name = map[int32]string{ + 0: "FIELD_BEHAVIOR_UNSPECIFIED", + 1: "OPTIONAL", + 2: "REQUIRED", + 3: "OUTPUT_ONLY", + 4: "INPUT_ONLY", + 5: "IMMUTABLE", + 6: "UNORDERED_LIST", + 7: "NON_EMPTY_DEFAULT", + } + FieldBehavior_value = map[string]int32{ + "FIELD_BEHAVIOR_UNSPECIFIED": 0, + "OPTIONAL": 1, + "REQUIRED": 2, + "OUTPUT_ONLY": 3, + "INPUT_ONLY": 4, + "IMMUTABLE": 5, + "UNORDERED_LIST": 6, + "NON_EMPTY_DEFAULT": 7, + } +) + +func (x FieldBehavior) Enum() *FieldBehavior { + p := new(FieldBehavior) + *p = x + return p +} + +func (x FieldBehavior) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldBehavior) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_field_behavior_proto_enumTypes[0].Descriptor() +} + +func (FieldBehavior) Type() protoreflect.EnumType { + return &file_google_api_field_behavior_proto_enumTypes[0] +} + +func (x FieldBehavior) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FieldBehavior.Descriptor instead. +func (FieldBehavior) EnumDescriptor() ([]byte, []int) { + return file_google_api_field_behavior_proto_rawDescGZIP(), []int{0} +} + +var file_google_api_field_behavior_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: ([]FieldBehavior)(nil), + Field: 1052, + Name: "google.api.field_behavior", + Tag: "varint,1052,rep,name=field_behavior,enum=google.api.FieldBehavior", + Filename: "google/api/field_behavior.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // A designation of a specific field behavior (required, output only, etc.) + // in protobuf messages. + // + // Examples: + // + // string name = 1 [(google.api.field_behavior) = REQUIRED]; + // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // google.protobuf.Duration ttl = 1 + // [(google.api.field_behavior) = INPUT_ONLY]; + // google.protobuf.Timestamp expire_time = 1 + // [(google.api.field_behavior) = OUTPUT_ONLY, + // (google.api.field_behavior) = IMMUTABLE]; + // + // repeated google.api.FieldBehavior field_behavior = 1052; + E_FieldBehavior = &file_google_api_field_behavior_proto_extTypes[0] +) + +var File_google_api_field_behavior_proto protoreflect.FileDescriptor + +var file_google_api_field_behavior_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, + 0xa6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, + 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56, + 0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, + 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0f, 0x0a, + 0x0b, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x0e, + 0x0a, 0x0a, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x12, 0x0d, + 0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a, + 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, + 0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44, + 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x52, 0x0d, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70, 0x0a, 0x0e, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x12, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_field_behavior_proto_rawDescOnce sync.Once + file_google_api_field_behavior_proto_rawDescData = file_google_api_field_behavior_proto_rawDesc +) + +func file_google_api_field_behavior_proto_rawDescGZIP() []byte { + file_google_api_field_behavior_proto_rawDescOnce.Do(func() { + file_google_api_field_behavior_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_behavior_proto_rawDescData) + }) + return file_google_api_field_behavior_proto_rawDescData +} + +var file_google_api_field_behavior_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_field_behavior_proto_goTypes = []interface{}{ + (FieldBehavior)(0), // 0: google.api.FieldBehavior + (*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions +} +var file_google_api_field_behavior_proto_depIdxs = []int32{ + 1, // 0: google.api.field_behavior:extendee -> google.protobuf.FieldOptions + 0, // 1: google.api.field_behavior:type_name -> google.api.FieldBehavior + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_api_field_behavior_proto_init() } +func file_google_api_field_behavior_proto_init() { + if File_google_api_field_behavior_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_field_behavior_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_field_behavior_proto_goTypes, + DependencyIndexes: file_google_api_field_behavior_proto_depIdxs, + EnumInfos: file_google_api_field_behavior_proto_enumTypes, + ExtensionInfos: file_google_api_field_behavior_proto_extTypes, + }.Build() + File_google_api_field_behavior_proto = out.File + file_google_api_field_behavior_proto_rawDesc = nil + file_google_api_field_behavior_proto_goTypes = nil + file_google_api_field_behavior_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go new file mode 100644 index 00000000000..6f11b7c500f --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -0,0 +1,777 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/api/http.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +type Http struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"` +} + +func (x *Http) Reset() { + *x = Http{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Http) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Http) ProtoMessage() {} + +func (x *Http) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Http.ProtoReflect.Descriptor instead. +func (*Http) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{0} +} + +func (x *Http) GetRules() []*HttpRule { + if x != nil { + return x.Rules + } + return nil +} + +func (x *Http) GetFullyDecodeReservedExpansion() bool { + if x != nil { + return x.FullyDecodeReservedExpansion + } + return false +} + +// # gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// +// This enables an HTTP REST to gRPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +// "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +// "123456")` +// +// ## Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all +// fields are passed via URL path and URL query parameters. +// +// ### Path template syntax +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// ## Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// Example: +// +// http: +// rules: +// # Selects a gRPC method and applies HttpRule to it. +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// ## Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +type HttpRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + // + // Types that are assignable to Pattern: + // *HttpRule_Get + // *HttpRule_Put + // *HttpRule_Post + // *HttpRule_Delete + // *HttpRule_Patch + // *HttpRule_Custom + Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"` + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"` + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"` +} + +func (x *HttpRule) Reset() { + *x = HttpRule{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpRule) ProtoMessage() {} + +func (x *HttpRule) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpRule.ProtoReflect.Descriptor instead. +func (*HttpRule) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{1} +} + +func (x *HttpRule) GetSelector() string { + if x != nil { + return x.Selector + } + return "" +} + +func (m *HttpRule) GetPattern() isHttpRule_Pattern { + if m != nil { + return m.Pattern + } + return nil +} + +func (x *HttpRule) GetGet() string { + if x, ok := x.GetPattern().(*HttpRule_Get); ok { + return x.Get + } + return "" +} + +func (x *HttpRule) GetPut() string { + if x, ok := x.GetPattern().(*HttpRule_Put); ok { + return x.Put + } + return "" +} + +func (x *HttpRule) GetPost() string { + if x, ok := x.GetPattern().(*HttpRule_Post); ok { + return x.Post + } + return "" +} + +func (x *HttpRule) GetDelete() string { + if x, ok := x.GetPattern().(*HttpRule_Delete); ok { + return x.Delete + } + return "" +} + +func (x *HttpRule) GetPatch() string { + if x, ok := x.GetPattern().(*HttpRule_Patch); ok { + return x.Patch + } + return "" +} + +func (x *HttpRule) GetCustom() *CustomHttpPattern { + if x, ok := x.GetPattern().(*HttpRule_Custom); ok { + return x.Custom + } + return nil +} + +func (x *HttpRule) GetBody() string { + if x != nil { + return x.Body + } + return "" +} + +func (x *HttpRule) GetResponseBody() string { + if x != nil { + return x.ResponseBody + } + return "" +} + +func (x *HttpRule) GetAdditionalBindings() []*HttpRule { + if x != nil { + return x.AdditionalBindings + } + return nil +} + +type isHttpRule_Pattern interface { + isHttpRule_Pattern() +} + +type HttpRule_Get struct { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"` +} + +type HttpRule_Put struct { + // Maps to HTTP PUT. Used for replacing a resource. + Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"` +} + +type HttpRule_Post struct { + // Maps to HTTP POST. Used for creating a resource or performing an action. + Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"` +} + +type HttpRule_Delete struct { + // Maps to HTTP DELETE. Used for deleting a resource. + Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"` +} + +type HttpRule_Patch struct { + // Maps to HTTP PATCH. Used for updating a resource. + Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"` +} + +type HttpRule_Custom struct { + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +} + +func (*HttpRule_Get) isHttpRule_Pattern() {} + +func (*HttpRule_Put) isHttpRule_Pattern() {} + +func (*HttpRule_Post) isHttpRule_Pattern() {} + +func (*HttpRule_Delete) isHttpRule_Pattern() {} + +func (*HttpRule_Patch) isHttpRule_Pattern() {} + +func (*HttpRule_Custom) isHttpRule_Pattern() {} + +// A custom pattern is used for defining custom HTTP verb. +type CustomHttpPattern struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of this custom HTTP verb. + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // The path matched by this custom verb. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *CustomHttpPattern) Reset() { + *x = CustomHttpPattern{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomHttpPattern) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomHttpPattern) ProtoMessage() {} + +func (x *CustomHttpPattern) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomHttpPattern.ProtoReflect.Descriptor instead. +func (*CustomHttpPattern) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{2} +} + +func (x *CustomHttpPattern) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (x *CustomHttpPattern) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +var File_google_api_http_proto protoreflect.FileDescriptor + +var file_google_api_http_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x22, 0x79, 0x0a, 0x04, 0x48, 0x74, 0x74, 0x70, 0x12, 0x2a, 0x0a, 0x05, 0x72, + 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, + 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x1f, 0x66, 0x75, 0x6c, 0x6c, 0x79, + 0x5f, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x1c, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xda, + 0x02, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x03, 0x70, + 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, + 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x04, 0x70, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x16, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, + 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x62, 0x6f, 0x64, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x13, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x12, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, + 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x22, 0x3b, 0x0a, 0x11, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, + 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_http_proto_rawDescOnce sync.Once + file_google_api_http_proto_rawDescData = file_google_api_http_proto_rawDesc +) + +func file_google_api_http_proto_rawDescGZIP() []byte { + file_google_api_http_proto_rawDescOnce.Do(func() { + file_google_api_http_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_http_proto_rawDescData) + }) + return file_google_api_http_proto_rawDescData +} + +var file_google_api_http_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_google_api_http_proto_goTypes = []interface{}{ + (*Http)(nil), // 0: google.api.Http + (*HttpRule)(nil), // 1: google.api.HttpRule + (*CustomHttpPattern)(nil), // 2: google.api.CustomHttpPattern +} +var file_google_api_http_proto_depIdxs = []int32{ + 1, // 0: google.api.Http.rules:type_name -> google.api.HttpRule + 2, // 1: google.api.HttpRule.custom:type_name -> google.api.CustomHttpPattern + 1, // 2: google.api.HttpRule.additional_bindings:type_name -> google.api.HttpRule + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_google_api_http_proto_init() } +func file_google_api_http_proto_init() { + if File_google_api_http_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_http_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_http_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_http_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomHttpPattern); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_api_http_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*HttpRule_Get)(nil), + (*HttpRule_Put)(nil), + (*HttpRule_Post)(nil), + (*HttpRule_Delete)(nil), + (*HttpRule_Patch)(nil), + (*HttpRule_Custom)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_http_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_http_proto_goTypes, + DependencyIndexes: file_google_api_http_proto_depIdxs, + MessageInfos: file_google_api_http_proto_msgTypes, + }.Build() + File_google_api_http_proto = out.File + file_google_api_http_proto_rawDesc = nil + file_google_api_http_proto_goTypes = nil + file_google_api_http_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go new file mode 100644 index 00000000000..13ea54b2940 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -0,0 +1,655 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/api/resource.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A description of the historical or future-looking state of the +// resource pattern. +type ResourceDescriptor_History int32 + +const ( + // The "unset" value. + ResourceDescriptor_HISTORY_UNSPECIFIED ResourceDescriptor_History = 0 + // The resource originally had one pattern and launched as such, and + // additional patterns were added later. + ResourceDescriptor_ORIGINALLY_SINGLE_PATTERN ResourceDescriptor_History = 1 + // The resource has one pattern, but the API owner expects to add more + // later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents + // that from being necessary once there are multiple patterns.) + ResourceDescriptor_FUTURE_MULTI_PATTERN ResourceDescriptor_History = 2 +) + +// Enum value maps for ResourceDescriptor_History. +var ( + ResourceDescriptor_History_name = map[int32]string{ + 0: "HISTORY_UNSPECIFIED", + 1: "ORIGINALLY_SINGLE_PATTERN", + 2: "FUTURE_MULTI_PATTERN", + } + ResourceDescriptor_History_value = map[string]int32{ + "HISTORY_UNSPECIFIED": 0, + "ORIGINALLY_SINGLE_PATTERN": 1, + "FUTURE_MULTI_PATTERN": 2, + } +) + +func (x ResourceDescriptor_History) Enum() *ResourceDescriptor_History { + p := new(ResourceDescriptor_History) + *p = x + return p +} + +func (x ResourceDescriptor_History) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ResourceDescriptor_History) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_resource_proto_enumTypes[0].Descriptor() +} + +func (ResourceDescriptor_History) Type() protoreflect.EnumType { + return &file_google_api_resource_proto_enumTypes[0] +} + +func (x ResourceDescriptor_History) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResourceDescriptor_History.Descriptor instead. +func (ResourceDescriptor_History) EnumDescriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{0, 0} +} + +// A flag representing a specific style that a resource claims to conform to. +type ResourceDescriptor_Style int32 + +const ( + // The unspecified value. Do not use. + ResourceDescriptor_STYLE_UNSPECIFIED ResourceDescriptor_Style = 0 + // This resource is intended to be "declarative-friendly". + // + // Declarative-friendly resources must be more strictly consistent, and + // setting this to true communicates to tools that this resource should + // adhere to declarative-friendly expectations. + // + // Note: This is used by the API linter (linter.aip.dev) to enable + // additional checks. + ResourceDescriptor_DECLARATIVE_FRIENDLY ResourceDescriptor_Style = 1 +) + +// Enum value maps for ResourceDescriptor_Style. +var ( + ResourceDescriptor_Style_name = map[int32]string{ + 0: "STYLE_UNSPECIFIED", + 1: "DECLARATIVE_FRIENDLY", + } + ResourceDescriptor_Style_value = map[string]int32{ + "STYLE_UNSPECIFIED": 0, + "DECLARATIVE_FRIENDLY": 1, + } +) + +func (x ResourceDescriptor_Style) Enum() *ResourceDescriptor_Style { + p := new(ResourceDescriptor_Style) + *p = x + return p +} + +func (x ResourceDescriptor_Style) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ResourceDescriptor_Style) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_resource_proto_enumTypes[1].Descriptor() +} + +func (ResourceDescriptor_Style) Type() protoreflect.EnumType { + return &file_google_api_resource_proto_enumTypes[1] +} + +func (x ResourceDescriptor_Style) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResourceDescriptor_Style.Descriptor instead. +func (ResourceDescriptor_Style) EnumDescriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{0, 1} +} + +// A simple descriptor of a resource type. +// +// ResourceDescriptor annotates a resource message (either by means of a +// protobuf annotation or use in the service config), and associates the +// resource's schema, the resource type, and the pattern of the resource name. +// +// Example: +// +// message Topic { +// // Indicates this message defines a resource schema. +// // Declares the resource type in the format of {service}/{kind}. +// // For Kubernetes resources, the format is {api group}/{kind}. +// option (google.api.resource) = { +// type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" +// +// Sometimes, resources have multiple patterns, typically because they can +// live under multiple parents. +// +// Example: +// +// message LogEntry { +// option (google.api.resource) = { +// type: "logging.googleapis.com/LogEntry" +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: 'logging.googleapis.com/LogEntry' +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" +type ResourceDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource type. It must be in the format of + // {service_name}/{resource_type_kind}. The `resource_type_kind` must be + // singular and must not include version numbers. + // + // Example: `storage.googleapis.com/Bucket` + // + // The value of the resource_type_kind must follow the regular expression + // /[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and + // should use PascalCase (UpperCamelCase). The maximum number of + // characters allowed for the `resource_type_kind` is 100. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Optional. The relative resource name pattern associated with this resource + // type. The DNS prefix of the full resource name shouldn't be specified here. + // + // The path pattern must follow the syntax, which aligns with HTTP binding + // syntax: + // + // Template = Segment { "/" Segment } ; + // Segment = LITERAL | Variable ; + // Variable = "{" LITERAL "}" ; + // + // Examples: + // + // - "projects/{project}/topics/{topic}" + // - "projects/{project}/knowledgeBases/{knowledge_base}" + // + // The components in braces correspond to the IDs for each resource in the + // hierarchy. It is expected that, if multiple patterns are provided, + // the same component name (e.g. "project") refers to IDs of the same + // type of resource. + Pattern []string `protobuf:"bytes,2,rep,name=pattern,proto3" json:"pattern,omitempty"` + // Optional. The field on the resource that designates the resource name + // field. If omitted, this is assumed to be "name". + NameField string `protobuf:"bytes,3,opt,name=name_field,json=nameField,proto3" json:"name_field,omitempty"` + // Optional. The historical or future-looking state of the resource pattern. + // + // Example: + // + // // The InspectTemplate message originally only supported resource + // // names with organization, and project was added later. + // message InspectTemplate { + // option (google.api.resource) = { + // type: "dlp.googleapis.com/InspectTemplate" + // pattern: + // "organizations/{organization}/inspectTemplates/{inspect_template}" + // pattern: "projects/{project}/inspectTemplates/{inspect_template}" + // history: ORIGINALLY_SINGLE_PATTERN + // }; + // } + History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` + // The plural name used in the resource name and permission names, such as + // 'projects' for the resource name of 'projects/{project}' and the permission + // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same + // concept of the `plural` field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // + // Note: The plural form is required even for singleton resources. See + // https://aip.dev/156 + Plural string `protobuf:"bytes,5,opt,name=plural,proto3" json:"plural,omitempty"` + // The same concept of the `singular` field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // Such as "project" for the `resourcemanager.googleapis.com/Project` type. + Singular string `protobuf:"bytes,6,opt,name=singular,proto3" json:"singular,omitempty"` + // Style flag(s) for this resource. + // These indicate that a resource is expected to conform to a given + // style. See the specific style flags for additional information. + Style []ResourceDescriptor_Style `protobuf:"varint,10,rep,packed,name=style,proto3,enum=google.api.ResourceDescriptor_Style" json:"style,omitempty"` +} + +func (x *ResourceDescriptor) Reset() { + *x = ResourceDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceDescriptor) ProtoMessage() {} + +func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_api_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceDescriptor.ProtoReflect.Descriptor instead. +func (*ResourceDescriptor) Descriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceDescriptor) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *ResourceDescriptor) GetPattern() []string { + if x != nil { + return x.Pattern + } + return nil +} + +func (x *ResourceDescriptor) GetNameField() string { + if x != nil { + return x.NameField + } + return "" +} + +func (x *ResourceDescriptor) GetHistory() ResourceDescriptor_History { + if x != nil { + return x.History + } + return ResourceDescriptor_HISTORY_UNSPECIFIED +} + +func (x *ResourceDescriptor) GetPlural() string { + if x != nil { + return x.Plural + } + return "" +} + +func (x *ResourceDescriptor) GetSingular() string { + if x != nil { + return x.Singular + } + return "" +} + +func (x *ResourceDescriptor) GetStyle() []ResourceDescriptor_Style { + if x != nil { + return x.Style + } + return nil +} + +// Defines a proto annotation that describes a string field that refers to +// an API resource. +type ResourceReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource type that the annotated field references. + // + // Example: + // + // message Subscription { + // string topic = 2 [(google.api.resource_reference) = { + // type: "pubsub.googleapis.com/Topic" + // }]; + // } + // + // Occasionally, a field may reference an arbitrary resource. In this case, + // APIs use the special value * in their resource reference. + // + // Example: + // + // message GetIamPolicyRequest { + // string resource = 2 [(google.api.resource_reference) = { + // type: "*" + // }]; + // } + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The resource type of a child collection that the annotated field + // references. This is useful for annotating the `parent` field that + // doesn't have a fixed resource type. + // + // Example: + // + // message ListLogEntriesRequest { + // string parent = 1 [(google.api.resource_reference) = { + // child_type: "logging.googleapis.com/LogEntry" + // }; + // } + ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"` +} + +func (x *ResourceReference) Reset() { + *x = ResourceReference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_resource_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceReference) ProtoMessage() {} + +func (x *ResourceReference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_resource_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceReference.ProtoReflect.Descriptor instead. +func (*ResourceReference) Descriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{1} +} + +func (x *ResourceReference) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *ResourceReference) GetChildType() string { + if x != nil { + return x.ChildType + } + return "" +} + +var file_google_api_resource_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*ResourceReference)(nil), + Field: 1055, + Name: "google.api.resource_reference", + Tag: "bytes,1055,opt,name=resource_reference", + Filename: "google/api/resource.proto", + }, + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: ([]*ResourceDescriptor)(nil), + Field: 1053, + Name: "google.api.resource_definition", + Tag: "bytes,1053,rep,name=resource_definition", + Filename: "google/api/resource.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*ResourceDescriptor)(nil), + Field: 1053, + Name: "google.api.resource", + Tag: "bytes,1053,opt,name=resource", + Filename: "google/api/resource.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // An annotation that describes a resource reference, see + // [ResourceReference][]. + // + // optional google.api.ResourceReference resource_reference = 1055; + E_ResourceReference = &file_google_api_resource_proto_extTypes[0] +) + +// Extension fields to descriptorpb.FileOptions. +var ( + // An annotation that describes a resource definition without a corresponding + // message; see [ResourceDescriptor][]. + // + // repeated google.api.ResourceDescriptor resource_definition = 1053; + E_ResourceDefinition = &file_google_api_resource_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // An annotation that describes a resource definition, see + // [ResourceDescriptor][]. + // + // optional google.api.ResourceDescriptor resource = 1053; + E_Resource = &file_google_api_resource_proto_extTypes[2] +) + +var File_google_api_resource_proto protoreflect.FileDescriptor + +var file_google_api_resource_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaa, 0x03, 0x0a, 0x12, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1d, + 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x40, 0x0a, + 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75, + 0x6c, 0x61, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75, + 0x6c, 0x61, 0x72, 0x12, 0x3a, 0x0a, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x79, 0x6c, 0x65, 0x52, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x22, + 0x5b, 0x0a, 0x07, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x13, 0x48, 0x49, + 0x53, 0x54, 0x4f, 0x52, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x4c, + 0x59, 0x5f, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e, + 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x46, 0x55, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x4d, 0x55, 0x4c, + 0x54, 0x49, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e, 0x10, 0x02, 0x22, 0x38, 0x0a, 0x05, + 0x53, 0x74, 0x79, 0x6c, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x59, 0x4c, 0x45, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, + 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x46, 0x52, 0x49, 0x45, + 0x4e, 0x44, 0x4c, 0x59, 0x10, 0x01, 0x22, 0x46, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x6c, + 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x9f, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x3a, 0x6e, 0x0a, 0x13, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x9d, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x5c, 0x0a, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9d, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, + 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_api_resource_proto_rawDescOnce sync.Once + file_google_api_resource_proto_rawDescData = file_google_api_resource_proto_rawDesc +) + +func file_google_api_resource_proto_rawDescGZIP() []byte { + file_google_api_resource_proto_rawDescOnce.Do(func() { + file_google_api_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_resource_proto_rawDescData) + }) + return file_google_api_resource_proto_rawDescData +} + +var file_google_api_resource_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_api_resource_proto_goTypes = []interface{}{ + (ResourceDescriptor_History)(0), // 0: google.api.ResourceDescriptor.History + (ResourceDescriptor_Style)(0), // 1: google.api.ResourceDescriptor.Style + (*ResourceDescriptor)(nil), // 2: google.api.ResourceDescriptor + (*ResourceReference)(nil), // 3: google.api.ResourceReference + (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions + (*descriptorpb.FileOptions)(nil), // 5: google.protobuf.FileOptions + (*descriptorpb.MessageOptions)(nil), // 6: google.protobuf.MessageOptions +} +var file_google_api_resource_proto_depIdxs = []int32{ + 0, // 0: google.api.ResourceDescriptor.history:type_name -> google.api.ResourceDescriptor.History + 1, // 1: google.api.ResourceDescriptor.style:type_name -> google.api.ResourceDescriptor.Style + 4, // 2: google.api.resource_reference:extendee -> google.protobuf.FieldOptions + 5, // 3: google.api.resource_definition:extendee -> google.protobuf.FileOptions + 6, // 4: google.api.resource:extendee -> google.protobuf.MessageOptions + 3, // 5: google.api.resource_reference:type_name -> google.api.ResourceReference + 2, // 6: google.api.resource_definition:type_name -> google.api.ResourceDescriptor + 2, // 7: google.api.resource:type_name -> google.api.ResourceDescriptor + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 5, // [5:8] is the sub-list for extension type_name + 2, // [2:5] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_google_api_resource_proto_init() } +func file_google_api_resource_proto_init() { + if File_google_api_resource_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_resource_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_resource_proto_rawDesc, + NumEnums: 2, + NumMessages: 2, + NumExtensions: 3, + NumServices: 0, + }, + GoTypes: file_google_api_resource_proto_goTypes, + DependencyIndexes: file_google_api_resource_proto_depIdxs, + EnumInfos: file_google_api_resource_proto_enumTypes, + MessageInfos: file_google_api_resource_proto_msgTypes, + ExtensionInfos: file_google_api_resource_proto_extTypes, + }.Build() + File_google_api_resource_proto = out.File + file_google_api_resource_proto_rawDesc = nil + file_google_api_resource_proto_goTypes = nil + file_google_api_resource_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go new file mode 100644 index 00000000000..6707a7b1c1d --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go @@ -0,0 +1,693 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/api/routing.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the routing information that should be sent along with the request +// in the form of routing header. +// **NOTE:** All service configuration rules follow the "last one wins" order. +// +// The examples below will apply to an RPC which has the following request type: +// +// Message Definition: +// +// message Request { +// // The name of the Table +// // Values can be of the following formats: +// // - `projects//tables/` +// // - `projects//instances//tables/
` +// // - `region//zones//tables/
` +// string table_name = 1; +// +// // This value specifies routing for replication. +// // It can be in the following formats: +// // - `profiles/` +// // - a legacy `profile_id` that can be any string +// string app_profile_id = 2; +// } +// +// Example message: +// +// { +// table_name: projects/proj_foo/instances/instance_bar/table/table_baz, +// app_profile_id: profiles/prof_qux +// } +// +// The routing header consists of one or multiple key-value pairs. Every key +// and value must be percent-encoded, and joined together in the format of +// `key1=value1&key2=value2`. +// In the examples below I am skipping the percent-encoding for readablity. +// +// # Example 1 +// +// Extracting a field from the request to put into the routing header +// unchanged, with the key equal to the field name. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `app_profile_id`. +// routing_parameters { +// field: "app_profile_id" +// } +// }; +// +// result: +// +// x-goog-request-params: app_profile_id=profiles/prof_qux +// +// # Example 2 +// +// Extracting a field from the request to put into the routing header +// unchanged, with the key different from the field name. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `app_profile_id`, but name it `routing_id` in the header. +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; +// +// result: +// +// x-goog-request-params: routing_id=profiles/prof_qux +// +// # Example 3 +// +// Extracting a field from the request to put into the routing +// header, while matching a path template syntax on the field's value. +// +// NB: it is more useful to send nothing than to send garbage for the purpose +// of dynamic routing, since garbage pollutes cache. Thus the matching. +// +// # Sub-example 3a +// +// The field matches the template. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with project-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// +// # Sub-example 3b +// +// The field does not match the template. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with region-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// }; +// +// result: +// +// +// +// # Sub-example 3c +// +// Multiple alternative conflictingly named path templates are +// specified. The one that matches is used to construct the header. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed, whether +// // using the region- or projects-based syntax. +// +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// +// # Example 4 +// +// Extracting a single routing header key-value pair by matching a +// template syntax on (a part of) a single request field. +// +// annotation: +// +// option (google.api.routing) = { +// // Take just the project id from the `table_name` field. +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// }; +// +// result: +// +// x-goog-request-params: routing_id=projects/proj_foo +// +// # Example 5 +// +// Extracting a single routing header key-value pair by matching +// several conflictingly named path templates on (parts of) a single request +// field. The last template to match "wins" the conflict. +// +// annotation: +// +// option (google.api.routing) = { +// // If the `table_name` does not have instances information, +// // take just the project id for routing. +// // Otherwise take project + instance. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*/instances/*}/**" +// } +// }; +// +// result: +// +// x-goog-request-params: +// routing_id=projects/proj_foo/instances/instance_bar +// +// # Example 6 +// +// Extracting multiple routing header key-value pairs by matching +// several non-conflicting path templates on (parts of) a single request field. +// +// # Sub-example 6a +// +// Make the templates strict, so that if the `table_name` does not +// have an instance information, nothing is sent. +// +// annotation: +// +// option (google.api.routing) = { +// // The routing code needs two keys instead of one composite +// // but works only for the tables with the "project-instance" name +// // syntax. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/instances/*/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; +// +// result: +// +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar +// +// # Sub-example 6b +// +// Make the templates loose, so that if the `table_name` does not +// have an instance information, just the project id part is sent. +// +// annotation: +// +// option (google.api.routing) = { +// // The routing code wants two keys instead of one composite +// // but will work with just the `project_id` for tables without +// // an instance in the `table_name`. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; +// +// result (is the same as 6a for our example message because it has the instance +// information): +// +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar +// +// # Example 7 +// +// Extracting multiple routing header key-value pairs by matching +// several path templates on multiple request fields. +// +// NB: note that here there is no way to specify sending nothing if one of the +// fields does not match its template. E.g. if the `table_name` is in the wrong +// format, the `project_id` will not be sent, but the `routing_id` will be. +// The backend routing code has to be aware of that and be prepared to not +// receive a full complement of keys if it expects multiple. +// +// annotation: +// +// option (google.api.routing) = { +// // The routing needs both `project_id` and `routing_id` +// // (from the `app_profile_id` field) for routing. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// project_id=projects/proj_foo&routing_id=profiles/prof_qux +// +// # Example 8 +// +// Extracting a single routing header key-value pair by matching +// several conflictingly named path templates on several request fields. The +// last template to match "wins" the conflict. +// +// annotation: +// +// option (google.api.routing) = { +// // The `routing_id` can be a project id or a region id depending on +// // the table name format, but only if the `app_profile_id` is not set. +// // If `app_profile_id` is set it should be used instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=regions/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; +// +// result: +// +// x-goog-request-params: routing_id=profiles/prof_qux +// +// # Example 9 +// +// Bringing it all together. +// +// annotation: +// +// option (google.api.routing) = { +// // For routing both `table_location` and a `routing_id` are needed. +// // +// // table_location can be either an instance id or a region+zone id. +// // +// // For `routing_id`, take the value of `app_profile_id` +// // - If it's in the format `profiles/`, send +// // just the `` part. +// // - If it's any other literal, send it as is. +// // If the `app_profile_id` is empty, and the `table_name` starts with +// // the project_id, send that instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{table_location=instances/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_location=regions/*/zones/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "profiles/{routing_id=*}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// table_location=instances/instance_bar&routing_id=prof_qux +type RoutingRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A collection of Routing Parameter specifications. + // **NOTE:** If multiple Routing Parameters describe the same key + // (via the `path_template` field or via the `field` field when + // `path_template` is not provided), "last one wins" rule + // determines which Parameter gets used. + // See the examples for more details. + RoutingParameters []*RoutingParameter `protobuf:"bytes,2,rep,name=routing_parameters,json=routingParameters,proto3" json:"routing_parameters,omitempty"` +} + +func (x *RoutingRule) Reset() { + *x = RoutingRule{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_routing_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutingRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutingRule) ProtoMessage() {} + +func (x *RoutingRule) ProtoReflect() protoreflect.Message { + mi := &file_google_api_routing_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutingRule.ProtoReflect.Descriptor instead. +func (*RoutingRule) Descriptor() ([]byte, []int) { + return file_google_api_routing_proto_rawDescGZIP(), []int{0} +} + +func (x *RoutingRule) GetRoutingParameters() []*RoutingParameter { + if x != nil { + return x.RoutingParameters + } + return nil +} + +// A projection from an input message to the GRPC or REST header. +type RoutingParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A request field to extract the header key-value pair from. + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + // A pattern matching the key-value field. Optional. + // If not specified, the whole field specified in the `field` field will be + // taken as value, and its name used as key. If specified, it MUST contain + // exactly one named segment (along with any number of unnamed segments) The + // pattern will be matched over the field specified in the `field` field, then + // if the match is successful: + // - the name of the single named segment will be used as a header name, + // - the match value of the segment will be used as a header value; + // if the match is NOT successful, nothing will be sent. + // + // Example: + // + // -- This is a field in the request message + // | that the header value will be extracted from. + // | + // | -- This is the key name in the + // | | routing header. + // V | + // field: "table_name" v + // path_template: "projects/*/{table_location=instances/*}/tables/*" + // ^ ^ + // | | + // In the {} brackets is the pattern that -- | + // specifies what to extract from the | + // field as a value to be sent. | + // | + // The string in the field must match the whole pattern -- + // before brackets, inside brackets, after brackets. + // + // When looking at this specific example, we can see that: + // - A key-value pair with the key `table_location` + // and the value matching `instances/*` should be added + // to the x-goog-request-params routing header. + // - The value is extracted from the request message's `table_name` field + // if it matches the full pattern specified: + // `projects/*/instances/*/tables/*`. + // + // **NB:** If the `path_template` field is not provided, the key name is + // equal to the field name, and the whole field should be sent as a value. + // This makes the pattern for the field and the value functionally equivalent + // to `**`, and the configuration + // + // { + // field: "table_name" + // } + // + // is a functionally equivalent shorthand to: + // + // { + // field: "table_name" + // path_template: "{table_name=**}" + // } + // + // See Example 1 for more details. + PathTemplate string `protobuf:"bytes,2,opt,name=path_template,json=pathTemplate,proto3" json:"path_template,omitempty"` +} + +func (x *RoutingParameter) Reset() { + *x = RoutingParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_routing_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutingParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutingParameter) ProtoMessage() {} + +func (x *RoutingParameter) ProtoReflect() protoreflect.Message { + mi := &file_google_api_routing_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutingParameter.ProtoReflect.Descriptor instead. +func (*RoutingParameter) Descriptor() ([]byte, []int) { + return file_google_api_routing_proto_rawDescGZIP(), []int{1} +} + +func (x *RoutingParameter) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *RoutingParameter) GetPathTemplate() string { + if x != nil { + return x.PathTemplate + } + return "" +} + +var file_google_api_routing_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*RoutingRule)(nil), + Field: 72295729, + Name: "google.api.routing", + Tag: "bytes,72295729,opt,name=routing", + Filename: "google/api/routing.proto", + }, +} + +// Extension fields to descriptorpb.MethodOptions. +var ( + // See RoutingRule. + // + // optional google.api.RoutingRule routing = 72295729; + E_Routing = &file_google_api_routing_proto_extTypes[0] +) + +var File_google_api_routing_proto protoreflect.FileDescriptor + +var file_google_api_routing_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5a, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x4b, 0x0a, 0x12, 0x72, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x52, 0x11, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x22, 0x4d, 0x0a, 0x10, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x23, + 0x0a, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x3a, 0x54, 0x0a, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb1, + 0xca, 0xbc, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x52, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0c, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, + 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_routing_proto_rawDescOnce sync.Once + file_google_api_routing_proto_rawDescData = file_google_api_routing_proto_rawDesc +) + +func file_google_api_routing_proto_rawDescGZIP() []byte { + file_google_api_routing_proto_rawDescOnce.Do(func() { + file_google_api_routing_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_routing_proto_rawDescData) + }) + return file_google_api_routing_proto_rawDescData +} + +var file_google_api_routing_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_api_routing_proto_goTypes = []interface{}{ + (*RoutingRule)(nil), // 0: google.api.RoutingRule + (*RoutingParameter)(nil), // 1: google.api.RoutingParameter + (*descriptorpb.MethodOptions)(nil), // 2: google.protobuf.MethodOptions +} +var file_google_api_routing_proto_depIdxs = []int32{ + 1, // 0: google.api.RoutingRule.routing_parameters:type_name -> google.api.RoutingParameter + 2, // 1: google.api.routing:extendee -> google.protobuf.MethodOptions + 0, // 2: google.api.routing:type_name -> google.api.RoutingRule + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 2, // [2:3] is the sub-list for extension type_name + 1, // [1:2] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_routing_proto_init() } +func file_google_api_routing_proto_init() { + if File_google_api_routing_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_routing_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutingRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_routing_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutingParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_routing_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_routing_proto_goTypes, + DependencyIndexes: file_google_api_routing_proto_depIdxs, + MessageInfos: file_google_api_routing_proto_msgTypes, + ExtensionInfos: file_google_api_routing_proto_extTypes, + }.Build() + File_google_api_routing_proto = out.File + file_google_api_routing_proto_rawDesc = nil + file_google_api_routing_proto_goTypes = nil + file_google_api_routing_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go new file mode 100644 index 00000000000..71075313773 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -0,0 +1,203 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.18.1 +// source: google/api/launch_stage.proto + +package api + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The launch stage as defined by [Google Cloud Platform +// Launch Stages](https://cloud.google.com/terms/launch-stages). +type LaunchStage int32 + +const ( + // Do not use this default value. + LaunchStage_LAUNCH_STAGE_UNSPECIFIED LaunchStage = 0 + // The feature is not yet implemented. Users can not use it. + LaunchStage_UNIMPLEMENTED LaunchStage = 6 + // Prelaunch features are hidden from users and are only visible internally. + LaunchStage_PRELAUNCH LaunchStage = 7 + // Early Access features are limited to a closed group of testers. To use + // these features, you must sign up in advance and sign a Trusted Tester + // agreement (which includes confidentiality provisions). These features may + // be unstable, changed in backward-incompatible ways, and are not + // guaranteed to be released. + LaunchStage_EARLY_ACCESS LaunchStage = 1 + // Alpha is a limited availability test for releases before they are cleared + // for widespread use. By Alpha, all significant design issues are resolved + // and we are in the process of verifying functionality. Alpha customers + // need to apply for access, agree to applicable terms, and have their + // projects allowlisted. Alpha releases don't have to be feature complete, + // no SLAs are provided, and there are no technical support obligations, but + // they will be far enough along that customers can actually use them in + // test environments or for limited-use tests -- just like they would in + // normal production cases. + LaunchStage_ALPHA LaunchStage = 2 + // Beta is the point at which we are ready to open a release for any + // customer to use. There are no SLA or technical support obligations in a + // Beta release. Products will be complete from a feature perspective, but + // may have some open outstanding issues. Beta releases are suitable for + // limited production use cases. + LaunchStage_BETA LaunchStage = 3 + // GA features are open to all developers and are considered stable and + // fully qualified for production use. + LaunchStage_GA LaunchStage = 4 + // Deprecated features are scheduled to be shut down and removed. For more + // information, see the "Deprecation Policy" section of our [Terms of + // Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation + // Policy](https://cloud.google.com/terms/deprecation) documentation. + LaunchStage_DEPRECATED LaunchStage = 5 +) + +// Enum value maps for LaunchStage. +var ( + LaunchStage_name = map[int32]string{ + 0: "LAUNCH_STAGE_UNSPECIFIED", + 6: "UNIMPLEMENTED", + 7: "PRELAUNCH", + 1: "EARLY_ACCESS", + 2: "ALPHA", + 3: "BETA", + 4: "GA", + 5: "DEPRECATED", + } + LaunchStage_value = map[string]int32{ + "LAUNCH_STAGE_UNSPECIFIED": 0, + "UNIMPLEMENTED": 6, + "PRELAUNCH": 7, + "EARLY_ACCESS": 1, + "ALPHA": 2, + "BETA": 3, + "GA": 4, + "DEPRECATED": 5, + } +) + +func (x LaunchStage) Enum() *LaunchStage { + p := new(LaunchStage) + *p = x + return p +} + +func (x LaunchStage) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LaunchStage) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_launch_stage_proto_enumTypes[0].Descriptor() +} + +func (LaunchStage) Type() protoreflect.EnumType { + return &file_google_api_launch_stage_proto_enumTypes[0] +} + +func (x LaunchStage) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LaunchStage.Descriptor instead. +func (LaunchStage) EnumDescriptor() ([]byte, []int) { + return file_google_api_launch_stage_proto_rawDescGZIP(), []int{0} +} + +var File_google_api_launch_stage_proto protoreflect.FileDescriptor + +var file_google_api_launch_stage_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, + 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2a, 0x8c, 0x01, 0x0a, 0x0b, + 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x4c, + 0x41, 0x55, 0x4e, 0x43, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, + 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, + 0x50, 0x52, 0x45, 0x4c, 0x41, 0x55, 0x4e, 0x43, 0x48, 0x10, 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x45, + 0x41, 0x52, 0x4c, 0x59, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x09, 0x0a, + 0x05, 0x41, 0x4c, 0x50, 0x48, 0x41, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x45, 0x54, 0x41, + 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x41, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, + 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x05, 0x42, 0x5a, 0x0a, 0x0e, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x10, 0x4c, 0x61, + 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x61, 0x70, 0x69, 0xa2, + 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_launch_stage_proto_rawDescOnce sync.Once + file_google_api_launch_stage_proto_rawDescData = file_google_api_launch_stage_proto_rawDesc +) + +func file_google_api_launch_stage_proto_rawDescGZIP() []byte { + file_google_api_launch_stage_proto_rawDescOnce.Do(func() { + file_google_api_launch_stage_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_launch_stage_proto_rawDescData) + }) + return file_google_api_launch_stage_proto_rawDescData +} + +var file_google_api_launch_stage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_launch_stage_proto_goTypes = []interface{}{ + (LaunchStage)(0), // 0: google.api.LaunchStage +} +var file_google_api_launch_stage_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_api_launch_stage_proto_init() } +func file_google_api_launch_stage_proto_init() { + if File_google_api_launch_stage_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_launch_stage_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_launch_stage_proto_goTypes, + DependencyIndexes: file_google_api_launch_stage_proto_depIdxs, + EnumInfos: file_google_api_launch_stage_proto_enumTypes, + }.Build() + File_google_api_launch_stage_proto = out.File + file_google_api_launch_stage_proto_rawDesc = nil + file_google_api_launch_stage_proto_goTypes = nil + file_google_api_launch_stage_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/kms/v1/alias.go b/vendor/google.golang.org/genproto/googleapis/cloud/kms/v1/alias.go new file mode 100644 index 00000000000..d788eb0ccca --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/kms/v1/alias.go @@ -0,0 +1,652 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by aliasgen. DO NOT EDIT. + +// Package kms aliases all exported identifiers in package +// "cloud.google.com/go/kms/apiv1/kmspb". +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb. +// Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md +// for more details. +package kms + +import ( + src "cloud.google.com/go/kms/apiv1/kmspb" + grpc "google.golang.org/grpc" +) + +// Deprecated: Please use consts in: cloud.google.com/go/kms/apiv1/kmspb +const ( + CryptoKeyVersion_CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED = src.CryptoKeyVersion_CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED + CryptoKeyVersion_CRYPTO_KEY_VERSION_STATE_UNSPECIFIED = src.CryptoKeyVersion_CRYPTO_KEY_VERSION_STATE_UNSPECIFIED + CryptoKeyVersion_CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED = src.CryptoKeyVersion_CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED + CryptoKeyVersion_DESTROYED = src.CryptoKeyVersion_DESTROYED + CryptoKeyVersion_DESTROY_SCHEDULED = src.CryptoKeyVersion_DESTROY_SCHEDULED + CryptoKeyVersion_DISABLED = src.CryptoKeyVersion_DISABLED + CryptoKeyVersion_EC_SIGN_P256_SHA256 = src.CryptoKeyVersion_EC_SIGN_P256_SHA256 + CryptoKeyVersion_EC_SIGN_P384_SHA384 = src.CryptoKeyVersion_EC_SIGN_P384_SHA384 + CryptoKeyVersion_EC_SIGN_SECP256K1_SHA256 = src.CryptoKeyVersion_EC_SIGN_SECP256K1_SHA256 + CryptoKeyVersion_ENABLED = src.CryptoKeyVersion_ENABLED + CryptoKeyVersion_EXTERNAL_SYMMETRIC_ENCRYPTION = src.CryptoKeyVersion_EXTERNAL_SYMMETRIC_ENCRYPTION + CryptoKeyVersion_FULL = src.CryptoKeyVersion_FULL + CryptoKeyVersion_GOOGLE_SYMMETRIC_ENCRYPTION = src.CryptoKeyVersion_GOOGLE_SYMMETRIC_ENCRYPTION + CryptoKeyVersion_HMAC_SHA256 = src.CryptoKeyVersion_HMAC_SHA256 + CryptoKeyVersion_IMPORT_FAILED = src.CryptoKeyVersion_IMPORT_FAILED + CryptoKeyVersion_PENDING_GENERATION = src.CryptoKeyVersion_PENDING_GENERATION + CryptoKeyVersion_PENDING_IMPORT = src.CryptoKeyVersion_PENDING_IMPORT + CryptoKeyVersion_RSA_DECRYPT_OAEP_2048_SHA1 = src.CryptoKeyVersion_RSA_DECRYPT_OAEP_2048_SHA1 + CryptoKeyVersion_RSA_DECRYPT_OAEP_2048_SHA256 = src.CryptoKeyVersion_RSA_DECRYPT_OAEP_2048_SHA256 + CryptoKeyVersion_RSA_DECRYPT_OAEP_3072_SHA1 = src.CryptoKeyVersion_RSA_DECRYPT_OAEP_3072_SHA1 + CryptoKeyVersion_RSA_DECRYPT_OAEP_3072_SHA256 = src.CryptoKeyVersion_RSA_DECRYPT_OAEP_3072_SHA256 + CryptoKeyVersion_RSA_DECRYPT_OAEP_4096_SHA1 = src.CryptoKeyVersion_RSA_DECRYPT_OAEP_4096_SHA1 + CryptoKeyVersion_RSA_DECRYPT_OAEP_4096_SHA256 = src.CryptoKeyVersion_RSA_DECRYPT_OAEP_4096_SHA256 + CryptoKeyVersion_RSA_DECRYPT_OAEP_4096_SHA512 = src.CryptoKeyVersion_RSA_DECRYPT_OAEP_4096_SHA512 + CryptoKeyVersion_RSA_SIGN_PKCS1_2048_SHA256 = src.CryptoKeyVersion_RSA_SIGN_PKCS1_2048_SHA256 + CryptoKeyVersion_RSA_SIGN_PKCS1_3072_SHA256 = src.CryptoKeyVersion_RSA_SIGN_PKCS1_3072_SHA256 + CryptoKeyVersion_RSA_SIGN_PKCS1_4096_SHA256 = src.CryptoKeyVersion_RSA_SIGN_PKCS1_4096_SHA256 + CryptoKeyVersion_RSA_SIGN_PKCS1_4096_SHA512 = src.CryptoKeyVersion_RSA_SIGN_PKCS1_4096_SHA512 + CryptoKeyVersion_RSA_SIGN_PSS_2048_SHA256 = src.CryptoKeyVersion_RSA_SIGN_PSS_2048_SHA256 + CryptoKeyVersion_RSA_SIGN_PSS_3072_SHA256 = src.CryptoKeyVersion_RSA_SIGN_PSS_3072_SHA256 + CryptoKeyVersion_RSA_SIGN_PSS_4096_SHA256 = src.CryptoKeyVersion_RSA_SIGN_PSS_4096_SHA256 + CryptoKeyVersion_RSA_SIGN_PSS_4096_SHA512 = src.CryptoKeyVersion_RSA_SIGN_PSS_4096_SHA512 + CryptoKeyVersion_RSA_SIGN_RAW_PKCS1_2048 = src.CryptoKeyVersion_RSA_SIGN_RAW_PKCS1_2048 + CryptoKeyVersion_RSA_SIGN_RAW_PKCS1_3072 = src.CryptoKeyVersion_RSA_SIGN_RAW_PKCS1_3072 + CryptoKeyVersion_RSA_SIGN_RAW_PKCS1_4096 = src.CryptoKeyVersion_RSA_SIGN_RAW_PKCS1_4096 + CryptoKey_ASYMMETRIC_DECRYPT = src.CryptoKey_ASYMMETRIC_DECRYPT + CryptoKey_ASYMMETRIC_SIGN = src.CryptoKey_ASYMMETRIC_SIGN + CryptoKey_CRYPTO_KEY_PURPOSE_UNSPECIFIED = src.CryptoKey_CRYPTO_KEY_PURPOSE_UNSPECIFIED + CryptoKey_ENCRYPT_DECRYPT = src.CryptoKey_ENCRYPT_DECRYPT + CryptoKey_MAC = src.CryptoKey_MAC + ImportJob_ACTIVE = src.ImportJob_ACTIVE + ImportJob_EXPIRED = src.ImportJob_EXPIRED + ImportJob_IMPORT_JOB_STATE_UNSPECIFIED = src.ImportJob_IMPORT_JOB_STATE_UNSPECIFIED + ImportJob_IMPORT_METHOD_UNSPECIFIED = src.ImportJob_IMPORT_METHOD_UNSPECIFIED + ImportJob_PENDING_GENERATION = src.ImportJob_PENDING_GENERATION + ImportJob_RSA_OAEP_3072_SHA1_AES_256 = src.ImportJob_RSA_OAEP_3072_SHA1_AES_256 + ImportJob_RSA_OAEP_4096_SHA1_AES_256 = src.ImportJob_RSA_OAEP_4096_SHA1_AES_256 + KeyOperationAttestation_ATTESTATION_FORMAT_UNSPECIFIED = src.KeyOperationAttestation_ATTESTATION_FORMAT_UNSPECIFIED + KeyOperationAttestation_CAVIUM_V1_COMPRESSED = src.KeyOperationAttestation_CAVIUM_V1_COMPRESSED + KeyOperationAttestation_CAVIUM_V2_COMPRESSED = src.KeyOperationAttestation_CAVIUM_V2_COMPRESSED + ProtectionLevel_EXTERNAL = src.ProtectionLevel_EXTERNAL + ProtectionLevel_EXTERNAL_VPC = src.ProtectionLevel_EXTERNAL_VPC + ProtectionLevel_HSM = src.ProtectionLevel_HSM + ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED = src.ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED + ProtectionLevel_SOFTWARE = src.ProtectionLevel_SOFTWARE +) + +// Deprecated: Please use vars in: cloud.google.com/go/kms/apiv1/kmspb +var ( + CryptoKeyVersion_CryptoKeyVersionAlgorithm_name = src.CryptoKeyVersion_CryptoKeyVersionAlgorithm_name + CryptoKeyVersion_CryptoKeyVersionAlgorithm_value = src.CryptoKeyVersion_CryptoKeyVersionAlgorithm_value + CryptoKeyVersion_CryptoKeyVersionState_name = src.CryptoKeyVersion_CryptoKeyVersionState_name + CryptoKeyVersion_CryptoKeyVersionState_value = src.CryptoKeyVersion_CryptoKeyVersionState_value + CryptoKeyVersion_CryptoKeyVersionView_name = src.CryptoKeyVersion_CryptoKeyVersionView_name + CryptoKeyVersion_CryptoKeyVersionView_value = src.CryptoKeyVersion_CryptoKeyVersionView_value + CryptoKey_CryptoKeyPurpose_name = src.CryptoKey_CryptoKeyPurpose_name + CryptoKey_CryptoKeyPurpose_value = src.CryptoKey_CryptoKeyPurpose_value + File_google_cloud_kms_v1_ekm_service_proto = src.File_google_cloud_kms_v1_ekm_service_proto + File_google_cloud_kms_v1_resources_proto = src.File_google_cloud_kms_v1_resources_proto + File_google_cloud_kms_v1_service_proto = src.File_google_cloud_kms_v1_service_proto + ImportJob_ImportJobState_name = src.ImportJob_ImportJobState_name + ImportJob_ImportJobState_value = src.ImportJob_ImportJobState_value + ImportJob_ImportMethod_name = src.ImportJob_ImportMethod_name + ImportJob_ImportMethod_value = src.ImportJob_ImportMethod_value + KeyOperationAttestation_AttestationFormat_name = src.KeyOperationAttestation_AttestationFormat_name + KeyOperationAttestation_AttestationFormat_value = src.KeyOperationAttestation_AttestationFormat_value + ProtectionLevel_name = src.ProtectionLevel_name + ProtectionLevel_value = src.ProtectionLevel_value +) + +// Request message for +// [KeyManagementService.AsymmetricDecrypt][google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type AsymmetricDecryptRequest = src.AsymmetricDecryptRequest + +// Response message for +// [KeyManagementService.AsymmetricDecrypt][google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type AsymmetricDecryptResponse = src.AsymmetricDecryptResponse + +// Request message for +// [KeyManagementService.AsymmetricSign][google.cloud.kms.v1.KeyManagementService.AsymmetricSign]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type AsymmetricSignRequest = src.AsymmetricSignRequest + +// Response message for +// [KeyManagementService.AsymmetricSign][google.cloud.kms.v1.KeyManagementService.AsymmetricSign]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type AsymmetricSignResponse = src.AsymmetricSignResponse + +// A [Certificate][google.cloud.kms.v1.Certificate] represents an X.509 +// certificate used to authenticate HTTPS connections to EKM replicas. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type Certificate = src.Certificate + +// Request message for +// [KeyManagementService.CreateCryptoKey][google.cloud.kms.v1.KeyManagementService.CreateCryptoKey]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type CreateCryptoKeyRequest = src.CreateCryptoKeyRequest + +// Request message for +// [KeyManagementService.CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type CreateCryptoKeyVersionRequest = src.CreateCryptoKeyVersionRequest + +// Request message for [KeyManagementService.CreateEkmConnection][]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type CreateEkmConnectionRequest = src.CreateEkmConnectionRequest + +// Request message for +// [KeyManagementService.CreateImportJob][google.cloud.kms.v1.KeyManagementService.CreateImportJob]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type CreateImportJobRequest = src.CreateImportJobRequest + +// Request message for +// [KeyManagementService.CreateKeyRing][google.cloud.kms.v1.KeyManagementService.CreateKeyRing]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type CreateKeyRingRequest = src.CreateKeyRingRequest + +// A [CryptoKey][google.cloud.kms.v1.CryptoKey] represents a logical key that +// can be used for cryptographic operations. A +// [CryptoKey][google.cloud.kms.v1.CryptoKey] is made up of zero or more +// [versions][google.cloud.kms.v1.CryptoKeyVersion], which represent the actual +// key material used in cryptographic operations. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type CryptoKey = src.CryptoKey + +// A [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] represents an +// individual cryptographic key, and the associated key material. An +// [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] +// version can be used for cryptographic operations. For security reasons, the +// raw cryptographic key material represented by a +// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] can never be viewed +// or exported. It can only be used to encrypt, decrypt, or sign data when an +// authorized user or application invokes Cloud KMS. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type CryptoKeyVersion = src.CryptoKeyVersion + +// A [CryptoKeyVersionTemplate][google.cloud.kms.v1.CryptoKeyVersionTemplate] +// specifies the properties to use when creating a new +// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], either manually +// with +// [CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion] +// or automatically as a result of auto-rotation. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type CryptoKeyVersionTemplate = src.CryptoKeyVersionTemplate + +// The algorithm of the +// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], indicating what +// parameters must be used for each cryptographic operation. The +// [GOOGLE_SYMMETRIC_ENCRYPTION][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm.GOOGLE_SYMMETRIC_ENCRYPTION] +// algorithm is usable with +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. +// Algorithms beginning with "RSA_SIGN_" are usable with +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN]. +// The fields in the name after "RSA_SIGN_" correspond to the following +// parameters: padding algorithm, modulus bit length, and digest algorithm. For +// PSS, the salt length used is equal to the length of digest algorithm. For +// example, +// [RSA_SIGN_PSS_2048_SHA256][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm.RSA_SIGN_PSS_2048_SHA256] +// will use PSS with a salt length of 256 bits or 32 bytes. Algorithms +// beginning with "RSA_DECRYPT_" are usable with +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT]. +// The fields in the name after "RSA_DECRYPT_" correspond to the following +// parameters: padding algorithm, modulus bit length, and digest algorithm. +// Algorithms beginning with "EC_SIGN_" are usable with +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN]. +// The fields in the name after "EC_SIGN_" correspond to the following +// parameters: elliptic curve, digest algorithm. Algorithms beginning with +// "HMAC_" are usable with +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [MAC][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.MAC]. The suffix +// following "HMAC_" corresponds to the hash algorithm being used (eg. SHA256). +// For more information, see [Key purposes and algorithms] +// (https://cloud.google.com/kms/docs/algorithms). +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type CryptoKeyVersion_CryptoKeyVersionAlgorithm = src.CryptoKeyVersion_CryptoKeyVersionAlgorithm + +// The state of a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], +// indicating if it can be used. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type CryptoKeyVersion_CryptoKeyVersionState = src.CryptoKeyVersion_CryptoKeyVersionState + +// A view for [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]s. +// Controls the level of detail returned for +// [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] in +// [KeyManagementService.ListCryptoKeyVersions][google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions] +// and +// [KeyManagementService.ListCryptoKeys][google.cloud.kms.v1.KeyManagementService.ListCryptoKeys]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type CryptoKeyVersion_CryptoKeyVersionView = src.CryptoKeyVersion_CryptoKeyVersionView + +// [CryptoKeyPurpose][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose] +// describes the cryptographic capabilities of a +// [CryptoKey][google.cloud.kms.v1.CryptoKey]. A given key can only be used for +// the operations allowed by its purpose. For more information, see [Key +// purposes](https://cloud.google.com/kms/docs/algorithms#key_purposes). +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type CryptoKey_CryptoKeyPurpose = src.CryptoKey_CryptoKeyPurpose +type CryptoKey_RotationPeriod = src.CryptoKey_RotationPeriod + +// Request message for +// [KeyManagementService.Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type DecryptRequest = src.DecryptRequest + +// Response message for +// [KeyManagementService.Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type DecryptResponse = src.DecryptResponse + +// Request message for +// [KeyManagementService.DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type DestroyCryptoKeyVersionRequest = src.DestroyCryptoKeyVersionRequest + +// A [Digest][google.cloud.kms.v1.Digest] holds a cryptographic message +// digest. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type Digest = src.Digest +type Digest_Sha256 = src.Digest_Sha256 +type Digest_Sha384 = src.Digest_Sha384 +type Digest_Sha512 = src.Digest_Sha512 + +// An [EkmConnection][google.cloud.kms.v1.EkmConnection] represents an +// individual EKM connection. It can be used for creating +// [CryptoKeys][google.cloud.kms.v1.CryptoKey] and +// [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] with a +// [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of +// [EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC], as well as +// performing cryptographic operations using keys created within the +// [EkmConnection][google.cloud.kms.v1.EkmConnection]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type EkmConnection = src.EkmConnection + +// A [ServiceResolver][google.cloud.kms.v1.EkmConnection.ServiceResolver] +// represents an EKM replica that can be reached within an +// [EkmConnection][google.cloud.kms.v1.EkmConnection]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type EkmConnection_ServiceResolver = src.EkmConnection_ServiceResolver + +// EkmServiceClient is the client API for EkmService service. For semantics +// around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type EkmServiceClient = src.EkmServiceClient + +// EkmServiceServer is the server API for EkmService service. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type EkmServiceServer = src.EkmServiceServer + +// Request message for +// [KeyManagementService.Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type EncryptRequest = src.EncryptRequest + +// Response message for +// [KeyManagementService.Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type EncryptResponse = src.EncryptResponse + +// ExternalProtectionLevelOptions stores a group of additional fields for +// configuring a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] that +// are specific to the [EXTERNAL][google.cloud.kms.v1.ProtectionLevel.EXTERNAL] +// protection level and +// [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC] protection +// levels. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type ExternalProtectionLevelOptions = src.ExternalProtectionLevelOptions + +// Request message for +// [KeyManagementService.GenerateRandomBytes][google.cloud.kms.v1.KeyManagementService.GenerateRandomBytes]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type GenerateRandomBytesRequest = src.GenerateRandomBytesRequest + +// Response message for +// [KeyManagementService.GenerateRandomBytes][google.cloud.kms.v1.KeyManagementService.GenerateRandomBytes]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type GenerateRandomBytesResponse = src.GenerateRandomBytesResponse + +// Request message for +// [KeyManagementService.GetCryptoKey][google.cloud.kms.v1.KeyManagementService.GetCryptoKey]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type GetCryptoKeyRequest = src.GetCryptoKeyRequest + +// Request message for +// [KeyManagementService.GetCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.GetCryptoKeyVersion]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type GetCryptoKeyVersionRequest = src.GetCryptoKeyVersionRequest + +// Request message for [KeyManagementService.GetEkmConnection][]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type GetEkmConnectionRequest = src.GetEkmConnectionRequest + +// Request message for +// [KeyManagementService.GetImportJob][google.cloud.kms.v1.KeyManagementService.GetImportJob]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type GetImportJobRequest = src.GetImportJobRequest + +// Request message for +// [KeyManagementService.GetKeyRing][google.cloud.kms.v1.KeyManagementService.GetKeyRing]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type GetKeyRingRequest = src.GetKeyRingRequest + +// Request message for +// [KeyManagementService.GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type GetPublicKeyRequest = src.GetPublicKeyRequest + +// Request message for +// [KeyManagementService.ImportCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type ImportCryptoKeyVersionRequest = src.ImportCryptoKeyVersionRequest +type ImportCryptoKeyVersionRequest_RsaAesWrappedKey = src.ImportCryptoKeyVersionRequest_RsaAesWrappedKey + +// An [ImportJob][google.cloud.kms.v1.ImportJob] can be used to create +// [CryptoKeys][google.cloud.kms.v1.CryptoKey] and +// [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] using pre-existing +// key material, generated outside of Cloud KMS. When an +// [ImportJob][google.cloud.kms.v1.ImportJob] is created, Cloud KMS will +// generate a "wrapping key", which is a public/private key pair. You use the +// wrapping key to encrypt (also known as wrap) the pre-existing key material +// to protect it during the import process. The nature of the wrapping key +// depends on the choice of +// [import_method][google.cloud.kms.v1.ImportJob.import_method]. When the +// wrapping key generation is complete, the +// [state][google.cloud.kms.v1.ImportJob.state] will be set to +// [ACTIVE][google.cloud.kms.v1.ImportJob.ImportJobState.ACTIVE] and the +// [public_key][google.cloud.kms.v1.ImportJob.public_key] can be fetched. The +// fetched public key can then be used to wrap your pre-existing key material. +// Once the key material is wrapped, it can be imported into a new +// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in an existing +// [CryptoKey][google.cloud.kms.v1.CryptoKey] by calling +// [ImportCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion]. +// Multiple [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] can be +// imported with a single [ImportJob][google.cloud.kms.v1.ImportJob]. Cloud KMS +// uses the private key portion of the wrapping key to unwrap the key material. +// Only Cloud KMS has access to the private key. An +// [ImportJob][google.cloud.kms.v1.ImportJob] expires 3 days after it is +// created. Once expired, Cloud KMS will no longer be able to import or unwrap +// any key material that was wrapped with the +// [ImportJob][google.cloud.kms.v1.ImportJob]'s public key. For more +// information, see [Importing a +// key](https://cloud.google.com/kms/docs/importing-a-key). +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type ImportJob = src.ImportJob + +// The state of the [ImportJob][google.cloud.kms.v1.ImportJob], indicating if +// it can be used. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type ImportJob_ImportJobState = src.ImportJob_ImportJobState + +// [ImportMethod][google.cloud.kms.v1.ImportJob.ImportMethod] describes the +// key wrapping method chosen for this +// [ImportJob][google.cloud.kms.v1.ImportJob]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type ImportJob_ImportMethod = src.ImportJob_ImportMethod + +// The public key component of the wrapping key. For details of the type of +// key this public key corresponds to, see the +// [ImportMethod][google.cloud.kms.v1.ImportJob.ImportMethod]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type ImportJob_WrappingPublicKey = src.ImportJob_WrappingPublicKey + +// KeyManagementServiceClient is the client API for KeyManagementService +// service. For semantics around ctx use and closing/ending streaming RPCs, +// please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type KeyManagementServiceClient = src.KeyManagementServiceClient + +// KeyManagementServiceServer is the server API for KeyManagementService +// service. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type KeyManagementServiceServer = src.KeyManagementServiceServer + +// Contains an HSM-generated attestation about a key operation. For more +// information, see [Verifying attestations] +// (https://cloud.google.com/kms/docs/attest-key). +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type KeyOperationAttestation = src.KeyOperationAttestation + +// Attestation formats provided by the HSM. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type KeyOperationAttestation_AttestationFormat = src.KeyOperationAttestation_AttestationFormat + +// Certificate chains needed to verify the attestation. Certificates in chains +// are PEM-encoded and are ordered based on +// https://tools.ietf.org/html/rfc5246#section-7.4.2. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type KeyOperationAttestation_CertificateChains = src.KeyOperationAttestation_CertificateChains + +// A [KeyRing][google.cloud.kms.v1.KeyRing] is a toplevel logical grouping of +// [CryptoKeys][google.cloud.kms.v1.CryptoKey]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type KeyRing = src.KeyRing + +// Request message for +// [KeyManagementService.ListCryptoKeyVersions][google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type ListCryptoKeyVersionsRequest = src.ListCryptoKeyVersionsRequest + +// Response message for +// [KeyManagementService.ListCryptoKeyVersions][google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type ListCryptoKeyVersionsResponse = src.ListCryptoKeyVersionsResponse + +// Request message for +// [KeyManagementService.ListCryptoKeys][google.cloud.kms.v1.KeyManagementService.ListCryptoKeys]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type ListCryptoKeysRequest = src.ListCryptoKeysRequest + +// Response message for +// [KeyManagementService.ListCryptoKeys][google.cloud.kms.v1.KeyManagementService.ListCryptoKeys]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type ListCryptoKeysResponse = src.ListCryptoKeysResponse + +// Request message for [KeyManagementService.ListEkmConnections][]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type ListEkmConnectionsRequest = src.ListEkmConnectionsRequest + +// Response message for [KeyManagementService.ListEkmConnections][]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type ListEkmConnectionsResponse = src.ListEkmConnectionsResponse + +// Request message for +// [KeyManagementService.ListImportJobs][google.cloud.kms.v1.KeyManagementService.ListImportJobs]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type ListImportJobsRequest = src.ListImportJobsRequest + +// Response message for +// [KeyManagementService.ListImportJobs][google.cloud.kms.v1.KeyManagementService.ListImportJobs]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type ListImportJobsResponse = src.ListImportJobsResponse + +// Request message for +// [KeyManagementService.ListKeyRings][google.cloud.kms.v1.KeyManagementService.ListKeyRings]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type ListKeyRingsRequest = src.ListKeyRingsRequest + +// Response message for +// [KeyManagementService.ListKeyRings][google.cloud.kms.v1.KeyManagementService.ListKeyRings]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type ListKeyRingsResponse = src.ListKeyRingsResponse + +// Cloud KMS metadata for the given +// [google.cloud.location.Location][google.cloud.location.Location]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type LocationMetadata = src.LocationMetadata + +// Request message for +// [KeyManagementService.MacSign][google.cloud.kms.v1.KeyManagementService.MacSign]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type MacSignRequest = src.MacSignRequest + +// Response message for +// [KeyManagementService.MacSign][google.cloud.kms.v1.KeyManagementService.MacSign]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type MacSignResponse = src.MacSignResponse + +// Request message for +// [KeyManagementService.MacVerify][google.cloud.kms.v1.KeyManagementService.MacVerify]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type MacVerifyRequest = src.MacVerifyRequest + +// Response message for +// [KeyManagementService.MacVerify][google.cloud.kms.v1.KeyManagementService.MacVerify]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type MacVerifyResponse = src.MacVerifyResponse + +// [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] specifies how +// cryptographic operations are performed. For more information, see +// [Protection levels] +// (https://cloud.google.com/kms/docs/algorithms#protection_levels). +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type ProtectionLevel = src.ProtectionLevel + +// The public key for a given +// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. Obtained via +// [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type PublicKey = src.PublicKey + +// Request message for +// [KeyManagementService.RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type RestoreCryptoKeyVersionRequest = src.RestoreCryptoKeyVersionRequest + +// UnimplementedEkmServiceServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type UnimplementedEkmServiceServer = src.UnimplementedEkmServiceServer + +// UnimplementedKeyManagementServiceServer can be embedded to have forward +// compatible implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type UnimplementedKeyManagementServiceServer = src.UnimplementedKeyManagementServiceServer + +// Request message for +// [KeyManagementService.UpdateCryptoKeyPrimaryVersion][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type UpdateCryptoKeyPrimaryVersionRequest = src.UpdateCryptoKeyPrimaryVersionRequest + +// Request message for +// [KeyManagementService.UpdateCryptoKey][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKey]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type UpdateCryptoKeyRequest = src.UpdateCryptoKeyRequest + +// Request message for +// [KeyManagementService.UpdateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyVersion]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type UpdateCryptoKeyVersionRequest = src.UpdateCryptoKeyVersionRequest + +// Request message for [KeyManagementService.UpdateEkmConnection][]. +// +// Deprecated: Please use types in: cloud.google.com/go/kms/apiv1/kmspb +type UpdateEkmConnectionRequest = src.UpdateEkmConnectionRequest + +// Deprecated: Please use funcs in: cloud.google.com/go/kms/apiv1/kmspb +func NewEkmServiceClient(cc grpc.ClientConnInterface) EkmServiceClient { + return src.NewEkmServiceClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/kms/apiv1/kmspb +func NewKeyManagementServiceClient(cc grpc.ClientConnInterface) KeyManagementServiceClient { + return src.NewKeyManagementServiceClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/kms/apiv1/kmspb +func RegisterEkmServiceServer(s *grpc.Server, srv EkmServiceServer) { + src.RegisterEkmServiceServer(s, srv) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/kms/apiv1/kmspb +func RegisterKeyManagementServiceServer(s *grpc.Server, srv KeyManagementServiceServer) { + src.RegisterKeyManagementServiceServer(s, srv) +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/location/locations.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/location/locations.pb.go new file mode 100644 index 00000000000..db70a101b87 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/location/locations.pb.go @@ -0,0 +1,631 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/cloud/location/locations.proto + +package location + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The request message for [Locations.ListLocations][google.cloud.location.Locations.ListLocations]. +type ListLocationsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource that owns the locations collection, if applicable. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The standard list filter. + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // The standard list page size. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // The standard list page token. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListLocationsRequest) Reset() { + *x = ListLocationsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_location_locations_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListLocationsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListLocationsRequest) ProtoMessage() {} + +func (x *ListLocationsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_location_locations_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListLocationsRequest.ProtoReflect.Descriptor instead. +func (*ListLocationsRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_location_locations_proto_rawDescGZIP(), []int{0} +} + +func (x *ListLocationsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListLocationsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListLocationsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListLocationsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The response message for [Locations.ListLocations][google.cloud.location.Locations.ListLocations]. +type ListLocationsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of locations that matches the specified filter in the request. + Locations []*Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"` + // The standard List next-page token. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListLocationsResponse) Reset() { + *x = ListLocationsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_location_locations_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListLocationsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListLocationsResponse) ProtoMessage() {} + +func (x *ListLocationsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_location_locations_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListLocationsResponse.ProtoReflect.Descriptor instead. +func (*ListLocationsResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_location_locations_proto_rawDescGZIP(), []int{1} +} + +func (x *ListLocationsResponse) GetLocations() []*Location { + if x != nil { + return x.Locations + } + return nil +} + +func (x *ListLocationsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The request message for [Locations.GetLocation][google.cloud.location.Locations.GetLocation]. +type GetLocationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Resource name for the location. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetLocationRequest) Reset() { + *x = GetLocationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_location_locations_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetLocationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetLocationRequest) ProtoMessage() {} + +func (x *GetLocationRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_location_locations_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetLocationRequest.ProtoReflect.Descriptor instead. +func (*GetLocationRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_location_locations_proto_rawDescGZIP(), []int{2} +} + +func (x *GetLocationRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// A resource that represents Google Cloud Platform location. +type Location struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Resource name for the location, which may vary between implementations. + // For example: `"projects/example-project/locations/us-east1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The canonical id for this location. For example: `"us-east1"`. + LocationId string `protobuf:"bytes,4,opt,name=location_id,json=locationId,proto3" json:"location_id,omitempty"` + // The friendly name for this location, typically a nearby city name. + // For example, "Tokyo". + DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Cross-service attributes for the location. For example + // + // {"cloud.googleapis.com/region": "us-east1"} + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Service-specific metadata. For example the available capacity at the given + // location. + Metadata *anypb.Any `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *Location) Reset() { + *x = Location{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_location_locations_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Location) ProtoMessage() {} + +func (x *Location) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_location_locations_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Location.ProtoReflect.Descriptor instead. +func (*Location) Descriptor() ([]byte, []int) { + return file_google_cloud_location_locations_proto_rawDescGZIP(), []int{3} +} + +func (x *Location) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Location) GetLocationId() string { + if x != nil { + return x.LocationId + } + return "" +} + +func (x *Location) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *Location) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Location) GetMetadata() *anypb.Any { + if x != nil { + return x.Metadata + } + return nil +} + +var File_google_cloud_location_locations_proto protoreflect.FileDescriptor + +var file_google_cloud_location_locations_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x1c, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x7e, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x7e, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x09, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, + 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x28, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x94, 0x02, 0x0a, 0x08, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, + 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x43, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x12, 0x30, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x32, 0xa4, 0x03, 0x0a, 0x09, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0xab, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3f, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x39, 0x12, 0x14, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x7d, 0x5a, 0x21, 0x12, 0x1f, 0x2f, 0x76, + 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x9e, 0x01, + 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x43, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x3d, 0x12, 0x16, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x5a, 0x23, 0x12, 0x21, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x1a, 0x48, + 0xca, 0x41, 0x14, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x6f, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_cloud_location_locations_proto_rawDescOnce sync.Once + file_google_cloud_location_locations_proto_rawDescData = file_google_cloud_location_locations_proto_rawDesc +) + +func file_google_cloud_location_locations_proto_rawDescGZIP() []byte { + file_google_cloud_location_locations_proto_rawDescOnce.Do(func() { + file_google_cloud_location_locations_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_location_locations_proto_rawDescData) + }) + return file_google_cloud_location_locations_proto_rawDescData +} + +var file_google_cloud_location_locations_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_cloud_location_locations_proto_goTypes = []interface{}{ + (*ListLocationsRequest)(nil), // 0: google.cloud.location.ListLocationsRequest + (*ListLocationsResponse)(nil), // 1: google.cloud.location.ListLocationsResponse + (*GetLocationRequest)(nil), // 2: google.cloud.location.GetLocationRequest + (*Location)(nil), // 3: google.cloud.location.Location + nil, // 4: google.cloud.location.Location.LabelsEntry + (*anypb.Any)(nil), // 5: google.protobuf.Any +} +var file_google_cloud_location_locations_proto_depIdxs = []int32{ + 3, // 0: google.cloud.location.ListLocationsResponse.locations:type_name -> google.cloud.location.Location + 4, // 1: google.cloud.location.Location.labels:type_name -> google.cloud.location.Location.LabelsEntry + 5, // 2: google.cloud.location.Location.metadata:type_name -> google.protobuf.Any + 0, // 3: google.cloud.location.Locations.ListLocations:input_type -> google.cloud.location.ListLocationsRequest + 2, // 4: google.cloud.location.Locations.GetLocation:input_type -> google.cloud.location.GetLocationRequest + 1, // 5: google.cloud.location.Locations.ListLocations:output_type -> google.cloud.location.ListLocationsResponse + 3, // 6: google.cloud.location.Locations.GetLocation:output_type -> google.cloud.location.Location + 5, // [5:7] is the sub-list for method output_type + 3, // [3:5] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_google_cloud_location_locations_proto_init() } +func file_google_cloud_location_locations_proto_init() { + if File_google_cloud_location_locations_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_cloud_location_locations_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListLocationsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_location_locations_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListLocationsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_location_locations_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetLocationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_location_locations_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_cloud_location_locations_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_cloud_location_locations_proto_goTypes, + DependencyIndexes: file_google_cloud_location_locations_proto_depIdxs, + MessageInfos: file_google_cloud_location_locations_proto_msgTypes, + }.Build() + File_google_cloud_location_locations_proto = out.File + file_google_cloud_location_locations_proto_rawDesc = nil + file_google_cloud_location_locations_proto_goTypes = nil + file_google_cloud_location_locations_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// LocationsClient is the client API for Locations service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type LocationsClient interface { + // Lists information about the supported locations for this service. + ListLocations(ctx context.Context, in *ListLocationsRequest, opts ...grpc.CallOption) (*ListLocationsResponse, error) + // Gets information about a location. + GetLocation(ctx context.Context, in *GetLocationRequest, opts ...grpc.CallOption) (*Location, error) +} + +type locationsClient struct { + cc grpc.ClientConnInterface +} + +func NewLocationsClient(cc grpc.ClientConnInterface) LocationsClient { + return &locationsClient{cc} +} + +func (c *locationsClient) ListLocations(ctx context.Context, in *ListLocationsRequest, opts ...grpc.CallOption) (*ListLocationsResponse, error) { + out := new(ListLocationsResponse) + err := c.cc.Invoke(ctx, "/google.cloud.location.Locations/ListLocations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *locationsClient) GetLocation(ctx context.Context, in *GetLocationRequest, opts ...grpc.CallOption) (*Location, error) { + out := new(Location) + err := c.cc.Invoke(ctx, "/google.cloud.location.Locations/GetLocation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LocationsServer is the server API for Locations service. +type LocationsServer interface { + // Lists information about the supported locations for this service. + ListLocations(context.Context, *ListLocationsRequest) (*ListLocationsResponse, error) + // Gets information about a location. + GetLocation(context.Context, *GetLocationRequest) (*Location, error) +} + +// UnimplementedLocationsServer can be embedded to have forward compatible implementations. +type UnimplementedLocationsServer struct { +} + +func (*UnimplementedLocationsServer) ListLocations(context.Context, *ListLocationsRequest) (*ListLocationsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListLocations not implemented") +} +func (*UnimplementedLocationsServer) GetLocation(context.Context, *GetLocationRequest) (*Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetLocation not implemented") +} + +func RegisterLocationsServer(s *grpc.Server, srv LocationsServer) { + s.RegisterService(&_Locations_serviceDesc, srv) +} + +func _Locations_ListLocations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListLocationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LocationsServer).ListLocations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.location.Locations/ListLocations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LocationsServer).ListLocations(ctx, req.(*ListLocationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Locations_GetLocation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetLocationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LocationsServer).GetLocation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.location.Locations/GetLocation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LocationsServer).GetLocation(ctx, req.(*GetLocationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Locations_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.location.Locations", + HandlerType: (*LocationsServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListLocations", + Handler: _Locations_ListLocations_Handler, + }, + { + MethodName: "GetLocation", + Handler: _Locations_GetLocation_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/location/locations.proto", +} diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go new file mode 100644 index 00000000000..9fb745926a5 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go @@ -0,0 +1,208 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by aliasgen. DO NOT EDIT. + +// Package iam aliases all exported identifiers in package +// "cloud.google.com/go/iam/apiv1/iampb". +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb. +// Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md +// for more details. +package iam + +import ( + src "cloud.google.com/go/iam/apiv1/iampb" + grpc "google.golang.org/grpc" +) + +// Deprecated: Please use consts in: cloud.google.com/go/iam/apiv1/iampb +const ( + AuditConfigDelta_ACTION_UNSPECIFIED = src.AuditConfigDelta_ACTION_UNSPECIFIED + AuditConfigDelta_ADD = src.AuditConfigDelta_ADD + AuditConfigDelta_REMOVE = src.AuditConfigDelta_REMOVE + AuditLogConfig_ADMIN_READ = src.AuditLogConfig_ADMIN_READ + AuditLogConfig_DATA_READ = src.AuditLogConfig_DATA_READ + AuditLogConfig_DATA_WRITE = src.AuditLogConfig_DATA_WRITE + AuditLogConfig_LOG_TYPE_UNSPECIFIED = src.AuditLogConfig_LOG_TYPE_UNSPECIFIED + BindingDelta_ACTION_UNSPECIFIED = src.BindingDelta_ACTION_UNSPECIFIED + BindingDelta_ADD = src.BindingDelta_ADD + BindingDelta_REMOVE = src.BindingDelta_REMOVE +) + +// Deprecated: Please use vars in: cloud.google.com/go/iam/apiv1/iampb +var ( + AuditConfigDelta_Action_name = src.AuditConfigDelta_Action_name + AuditConfigDelta_Action_value = src.AuditConfigDelta_Action_value + AuditLogConfig_LogType_name = src.AuditLogConfig_LogType_name + AuditLogConfig_LogType_value = src.AuditLogConfig_LogType_value + BindingDelta_Action_name = src.BindingDelta_Action_name + BindingDelta_Action_value = src.BindingDelta_Action_value + File_google_iam_v1_iam_policy_proto = src.File_google_iam_v1_iam_policy_proto + File_google_iam_v1_options_proto = src.File_google_iam_v1_options_proto + File_google_iam_v1_policy_proto = src.File_google_iam_v1_policy_proto +) + +// Specifies the audit configuration for a service. The configuration +// determines which permission types are logged, and what identities, if any, +// are exempted from logging. An AuditConfig must have one or more +// AuditLogConfigs. If there are AuditConfigs for both `allServices` and a +// specific service, the union of the two AuditConfigs is used for that +// service: the log_types specified in each AuditConfig are enabled, and the +// exempted_members in each AuditLogConfig are exempted. Example Policy with +// multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ +// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": +// "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", +// "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": +// "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For +// sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ +// logging. It also exempts jose@example.com from DATA_READ logging, and +// aliya@example.com from DATA_WRITE logging. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditConfig = src.AuditConfig + +// One delta entry for AuditConfig. Each individual change (only one +// exempted_member in each entry) to a AuditConfig will be a separate entry. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditConfigDelta = src.AuditConfigDelta + +// The type of action performed on an audit configuration in a policy. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditConfigDelta_Action = src.AuditConfigDelta_Action + +// Provides the configuration for logging a type of permissions. Example: { +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ +// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables +// 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from +// DATA_READ logging. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditLogConfig = src.AuditLogConfig + +// The list of valid permission types for which logging can be configured. +// Admin writes are always logged, and are not configurable. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditLogConfig_LogType = src.AuditLogConfig_LogType + +// Associates `members`, or principals, with a `role`. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type Binding = src.Binding + +// One delta entry for Binding. Each individual change (only one member in +// each entry) to a binding will be a separate entry. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type BindingDelta = src.BindingDelta + +// The type of action performed on a Binding in a policy. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type BindingDelta_Action = src.BindingDelta_Action + +// Request message for `GetIamPolicy` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type GetIamPolicyRequest = src.GetIamPolicyRequest + +// Encapsulates settings provided to GetIamPolicy. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type GetPolicyOptions = src.GetPolicyOptions + +// IAMPolicyClient is the client API for IAMPolicy service. For semantics +// around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type IAMPolicyClient = src.IAMPolicyClient + +// IAMPolicyServer is the server API for IAMPolicy service. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type IAMPolicyServer = src.IAMPolicyServer + +// An Identity and Access Management (IAM) policy, which specifies access +// controls for Google Cloud resources. A `Policy` is a collection of +// `bindings`. A `binding` binds one or more `members`, or principals, to a +// single `role`. Principals can be user accounts, service accounts, Google +// groups, and domains (such as G Suite). A `role` is a named list of +// permissions; each `role` can be an IAM predefined role or a user-created +// custom role. For some types of Google Cloud resources, a `binding` can also +// specify a `condition`, which is a logical expression that allows access to a +// resource only if the expression evaluates to `true`. A condition can add +// constraints based on attributes of the request, the resource, or both. To +// learn which resources support conditions in their IAM policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-policies). +// **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": +// "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": +// "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: - +// user:mike@example.com - group:admins@example.com - domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - user:eve@example.com +// role: roles/resourcemanager.organizationViewer condition: title: expirable +// access description: Does not grant access after Sep 2020 expression: +// request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= +// version: 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type Policy = src.Policy + +// The difference delta between two policies. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type PolicyDelta = src.PolicyDelta + +// Request message for `SetIamPolicy` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type SetIamPolicyRequest = src.SetIamPolicyRequest + +// Request message for `TestIamPermissions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type TestIamPermissionsRequest = src.TestIamPermissionsRequest + +// Response message for `TestIamPermissions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type TestIamPermissionsResponse = src.TestIamPermissionsResponse + +// UnimplementedIAMPolicyServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type UnimplementedIAMPolicyServer = src.UnimplementedIAMPolicyServer + +// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb +func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient { + return src.NewIAMPolicyClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb +func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { + src.RegisterIAMPolicyServer(s, srv) +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go new file mode 100644 index 00000000000..3a47b902c9a --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -0,0 +1,335 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/rpc/code.proto + +package code + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The canonical error codes for gRPC APIs. +// +// Sometimes multiple error codes may apply. Services should return +// the most specific error code that applies. For example, prefer +// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. +// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`. +type Code int32 + +const ( + // Not an error; returned on success + // + // HTTP Mapping: 200 OK + Code_OK Code = 0 + // The operation was cancelled, typically by the caller. + // + // HTTP Mapping: 499 Client Closed Request + Code_CANCELLED Code = 1 + // Unknown error. For example, this error may be returned when + // a `Status` value received from another address space belongs to + // an error space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + // + // HTTP Mapping: 500 Internal Server Error + Code_UNKNOWN Code = 2 + // The client specified an invalid argument. Note that this differs + // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + // + // HTTP Mapping: 400 Bad Request + Code_INVALID_ARGUMENT Code = 3 + // The deadline expired before the operation could complete. For operations + // that change the state of the system, this error may be returned + // even if the operation has completed successfully. For example, a + // successful response from a server could have been delayed long + // enough for the deadline to expire. + // + // HTTP Mapping: 504 Gateway Timeout + Code_DEADLINE_EXCEEDED Code = 4 + // Some requested entity (e.g., file or directory) was not found. + // + // Note to server developers: if a request is denied for an entire class + // of users, such as gradual feature rollout or undocumented whitelist, + // `NOT_FOUND` may be used. If a request is denied for some users within + // a class of users, such as user-based access control, `PERMISSION_DENIED` + // must be used. + // + // HTTP Mapping: 404 Not Found + Code_NOT_FOUND Code = 5 + // The entity that a client attempted to create (e.g., file or directory) + // already exists. + // + // HTTP Mapping: 409 Conflict + Code_ALREADY_EXISTS Code = 6 + // The caller does not have permission to execute the specified + // operation. `PERMISSION_DENIED` must not be used for rejections + // caused by exhausting some resource (use `RESOURCE_EXHAUSTED` + // instead for those errors). `PERMISSION_DENIED` must not be + // used if the caller can not be identified (use `UNAUTHENTICATED` + // instead for those errors). This error code does not imply the + // request is valid or the requested entity exists or satisfies + // other pre-conditions. + // + // HTTP Mapping: 403 Forbidden + Code_PERMISSION_DENIED Code = 7 + // The request does not have valid authentication credentials for the + // operation. + // + // HTTP Mapping: 401 Unauthorized + Code_UNAUTHENTICATED Code = 16 + // Some resource has been exhausted, perhaps a per-user quota, or + // perhaps the entire file system is out of space. + // + // HTTP Mapping: 429 Too Many Requests + Code_RESOURCE_EXHAUSTED Code = 8 + // The operation was rejected because the system is not in a state + // required for the operation's execution. For example, the directory + // to be deleted is non-empty, an rmdir operation is applied to + // a non-directory, etc. + // + // Service implementors can use the following guidelines to decide + // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: + // (a) Use `UNAVAILABLE` if the client can retry just the failing call. + // (b) Use `ABORTED` if the client should retry at a higher level + // (e.g., when a client-specified test-and-set fails, indicating the + // client should restart a read-modify-write sequence). + // (c) Use `FAILED_PRECONDITION` if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, `FAILED_PRECONDITION` + // should be returned since the client should not retry unless + // the files are deleted from the directory. + // + // HTTP Mapping: 400 Bad Request + Code_FAILED_PRECONDITION Code = 9 + // The operation was aborted, typically due to a concurrency issue such as + // a sequencer check failure or transaction abort. + // + // See the guidelines above for deciding between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`. + // + // HTTP Mapping: 409 Conflict + Code_ABORTED Code = 10 + // The operation was attempted past the valid range. E.g., seeking or + // reading past end-of-file. + // + // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate `INVALID_ARGUMENT` if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // `OUT_OF_RANGE` if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between `FAILED_PRECONDITION` and + // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an `OUT_OF_RANGE` error to detect when + // they are done. + // + // HTTP Mapping: 400 Bad Request + Code_OUT_OF_RANGE Code = 11 + // The operation is not implemented or is not supported/enabled in this + // service. + // + // HTTP Mapping: 501 Not Implemented + Code_UNIMPLEMENTED Code = 12 + // Internal errors. This means that some invariants expected by the + // underlying system have been broken. This error code is reserved + // for serious errors. + // + // HTTP Mapping: 500 Internal Server Error + Code_INTERNAL Code = 13 + // The service is currently unavailable. This is most likely a + // transient condition, which can be corrected by retrying with + // a backoff. Note that it is not always safe to retry + // non-idempotent operations. + // + // See the guidelines above for deciding between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`. + // + // HTTP Mapping: 503 Service Unavailable + Code_UNAVAILABLE Code = 14 + // Unrecoverable data loss or corruption. + // + // HTTP Mapping: 500 Internal Server Error + Code_DATA_LOSS Code = 15 +) + +// Enum value maps for Code. +var ( + Code_name = map[int32]string{ + 0: "OK", + 1: "CANCELLED", + 2: "UNKNOWN", + 3: "INVALID_ARGUMENT", + 4: "DEADLINE_EXCEEDED", + 5: "NOT_FOUND", + 6: "ALREADY_EXISTS", + 7: "PERMISSION_DENIED", + 16: "UNAUTHENTICATED", + 8: "RESOURCE_EXHAUSTED", + 9: "FAILED_PRECONDITION", + 10: "ABORTED", + 11: "OUT_OF_RANGE", + 12: "UNIMPLEMENTED", + 13: "INTERNAL", + 14: "UNAVAILABLE", + 15: "DATA_LOSS", + } + Code_value = map[string]int32{ + "OK": 0, + "CANCELLED": 1, + "UNKNOWN": 2, + "INVALID_ARGUMENT": 3, + "DEADLINE_EXCEEDED": 4, + "NOT_FOUND": 5, + "ALREADY_EXISTS": 6, + "PERMISSION_DENIED": 7, + "UNAUTHENTICATED": 16, + "RESOURCE_EXHAUSTED": 8, + "FAILED_PRECONDITION": 9, + "ABORTED": 10, + "OUT_OF_RANGE": 11, + "UNIMPLEMENTED": 12, + "INTERNAL": 13, + "UNAVAILABLE": 14, + "DATA_LOSS": 15, + } +) + +func (x Code) Enum() *Code { + p := new(Code) + *p = x + return p +} + +func (x Code) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Code) Descriptor() protoreflect.EnumDescriptor { + return file_google_rpc_code_proto_enumTypes[0].Descriptor() +} + +func (Code) Type() protoreflect.EnumType { + return &file_google_rpc_code_proto_enumTypes[0] +} + +func (x Code) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Code.Descriptor instead. +func (Code) EnumDescriptor() ([]byte, []int) { + return file_google_rpc_code_proto_rawDescGZIP(), []int{0} +} + +var File_google_rpc_code_proto protoreflect.FileDescriptor + +var file_google_rpc_code_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x72, 0x70, 0x63, 0x2a, 0xb7, 0x02, 0x0a, 0x04, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x06, 0x0a, 0x02, + 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, + 0x12, 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, + 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49, + 0x4e, 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0d, 0x0a, + 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, + 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06, + 0x12, 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, + 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x41, 0x55, 0x54, + 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x10, 0x12, 0x16, 0x0a, 0x12, + 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, 0x53, 0x54, + 0x45, 0x44, 0x10, 0x08, 0x12, 0x17, 0x0a, 0x13, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50, + 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x0b, 0x0a, + 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, + 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x11, 0x0a, 0x0d, + 0x55, 0x4e, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12, + 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a, + 0x0b, 0x55, 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x0e, 0x12, 0x0d, + 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x0f, 0x42, 0x58, 0x0a, + 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, + 0x09, 0x43, 0x6f, 0x64, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x33, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x3b, 0x63, 0x6f, 0x64, + 0x65, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_code_proto_rawDescOnce sync.Once + file_google_rpc_code_proto_rawDescData = file_google_rpc_code_proto_rawDesc +) + +func file_google_rpc_code_proto_rawDescGZIP() []byte { + file_google_rpc_code_proto_rawDescOnce.Do(func() { + file_google_rpc_code_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_code_proto_rawDescData) + }) + return file_google_rpc_code_proto_rawDescData +} + +var file_google_rpc_code_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_rpc_code_proto_goTypes = []interface{}{ + (Code)(0), // 0: google.rpc.Code +} +var file_google_rpc_code_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_rpc_code_proto_init() } +func file_google_rpc_code_proto_init() { + if File_google_rpc_code_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_code_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_code_proto_goTypes, + DependencyIndexes: file_google_rpc_code_proto_depIdxs, + EnumInfos: file_google_rpc_code_proto_enumTypes, + }.Build() + File_google_rpc_code_proto = out.File + file_google_rpc_code_proto_rawDesc = nil + file_google_rpc_code_proto_goTypes = nil + file_google_rpc_code_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go new file mode 100644 index 00000000000..2f3ab924948 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -0,0 +1,1278 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/rpc/error_details.proto + +package errdetails + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Describes when the clients can retry a failed request. Clients could ignore +// the recommendation here or retry when this information is missing from error +// responses. +// +// It's always recommended that clients should use exponential backoff when +// retrying. +// +// Clients should wait until `retry_delay` amount of time has passed since +// receiving the error response before retrying. If retrying requests also +// fail, clients should use an exponential backoff scheme to gradually increase +// the delay between retries based on `retry_delay`, until either a maximum +// number of retries have been reached or a maximum retry delay cap has been +// reached. +type RetryInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Clients should wait at least this long between retrying the same request. + RetryDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay,proto3" json:"retry_delay,omitempty"` +} + +func (x *RetryInfo) Reset() { + *x = RetryInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryInfo) ProtoMessage() {} + +func (x *RetryInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryInfo.ProtoReflect.Descriptor instead. +func (*RetryInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0} +} + +func (x *RetryInfo) GetRetryDelay() *durationpb.Duration { + if x != nil { + return x.RetryDelay + } + return nil +} + +// Describes additional debugging info. +type DebugInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The stack trace entries indicating where the error occurred. + StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries,proto3" json:"stack_entries,omitempty"` + // Additional debugging information provided by the server. + Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"` +} + +func (x *DebugInfo) Reset() { + *x = DebugInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DebugInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DebugInfo) ProtoMessage() {} + +func (x *DebugInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DebugInfo.ProtoReflect.Descriptor instead. +func (*DebugInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1} +} + +func (x *DebugInfo) GetStackEntries() []string { + if x != nil { + return x.StackEntries + } + return nil +} + +func (x *DebugInfo) GetDetail() string { + if x != nil { + return x.Detail + } + return "" +} + +// Describes how a quota check failed. +// +// For example if a daily limit was exceeded for the calling project, +// a service could respond with a QuotaFailure detail containing the project +// id and the description of the quota limit that was exceeded. If the +// calling project hasn't enabled the service in the developer console, then +// a service could respond with the project id and set `service_disabled` +// to true. +// +// Also see RetryInfo and Help types for other details about handling a +// quota failure. +type QuotaFailure struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes all quota violations. + Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` +} + +func (x *QuotaFailure) Reset() { + *x = QuotaFailure{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuotaFailure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuotaFailure) ProtoMessage() {} + +func (x *QuotaFailure) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuotaFailure.ProtoReflect.Descriptor instead. +func (*QuotaFailure) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2} +} + +func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { + if x != nil { + return x.Violations + } + return nil +} + +// Describes the cause of the error with structured details. +// +// Example of an error when contacting the "pubsub.googleapis.com" API when it +// is not enabled: +// +// { "reason": "API_DISABLED" +// "domain": "googleapis.com" +// "metadata": { +// "resource": "projects/123", +// "service": "pubsub.googleapis.com" +// } +// } +// +// This response indicates that the pubsub.googleapis.com API is not enabled. +// +// Example of an error that is returned when attempting to create a Spanner +// instance in a region that is out of stock: +// +// { "reason": "STOCKOUT" +// "domain": "spanner.googleapis.com", +// "metadata": { +// "availableRegions": "us-central1,us-east2" +// } +// } +type ErrorInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The reason of the error. This is a constant value that identifies the + // proximate cause of the error. Error reasons are unique within a particular + // domain of errors. This should be at most 63 characters and match + // /[A-Z0-9_]+/. + Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` + // The logical grouping to which the "reason" belongs. The error domain + // is typically the registered service name of the tool or product that + // generates the error. Example: "pubsub.googleapis.com". If the error is + // generated by some common infrastructure, the error domain must be a + // globally unique value that identifies the infrastructure. For Google API + // infrastructure, the error domain is "googleapis.com". + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` + // Additional structured details about this error. + // + // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // length. When identifying the current value of an exceeded limit, the units + // should be contained in the key, not the value. For example, rather than + // {"instanceLimit": "100/request"}, should be returned as, + // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // instances that can be created in a single (batch) request. + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ErrorInfo) Reset() { + *x = ErrorInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorInfo) ProtoMessage() {} + +func (x *ErrorInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead. +func (*ErrorInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3} +} + +func (x *ErrorInfo) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *ErrorInfo) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ErrorInfo) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +// Describes what preconditions have failed. +// +// For example, if an RPC failed because it required the Terms of Service to be +// acknowledged, it could list the terms of service violation in the +// PreconditionFailure message. +type PreconditionFailure struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes all precondition violations. + Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` +} + +func (x *PreconditionFailure) Reset() { + *x = PreconditionFailure{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PreconditionFailure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PreconditionFailure) ProtoMessage() {} + +func (x *PreconditionFailure) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PreconditionFailure.ProtoReflect.Descriptor instead. +func (*PreconditionFailure) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4} +} + +func (x *PreconditionFailure) GetViolations() []*PreconditionFailure_Violation { + if x != nil { + return x.Violations + } + return nil +} + +// Describes violations in a client request. This error type focuses on the +// syntactic aspects of the request. +type BadRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes all violations in a client request. + FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations,proto3" json:"field_violations,omitempty"` +} + +func (x *BadRequest) Reset() { + *x = BadRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BadRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BadRequest) ProtoMessage() {} + +func (x *BadRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BadRequest.ProtoReflect.Descriptor instead. +func (*BadRequest) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5} +} + +func (x *BadRequest) GetFieldViolations() []*BadRequest_FieldViolation { + if x != nil { + return x.FieldViolations + } + return nil +} + +// Contains metadata about the request that clients can attach when filing a bug +// or providing other forms of feedback. +type RequestInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An opaque string that should only be interpreted by the service generating + // it. For example, it can be used to identify requests in the service's logs. + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Any data that was used to serve this request. For example, an encrypted + // stack trace that can be sent back to the service provider for debugging. + ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"` +} + +func (x *RequestInfo) Reset() { + *x = RequestInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestInfo) ProtoMessage() {} + +func (x *RequestInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestInfo.ProtoReflect.Descriptor instead. +func (*RequestInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{6} +} + +func (x *RequestInfo) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *RequestInfo) GetServingData() string { + if x != nil { + return x.ServingData + } + return "" +} + +// Describes the resource that is being accessed. +type ResourceInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A name for the type of resource being accessed, e.g. "sql table", + // "cloud storage bucket", "file", "Google calendar"; or the type URL + // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic". + ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` + // The name of the resource being accessed. For example, a shared calendar + // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current + // error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + // The owner of the resource (optional). + // For example, "user:" or "project:". + Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` + // Describes what error is encountered when accessing this resource. + // For example, updating a cloud project may require the `writer` permission + // on the developer console project. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *ResourceInfo) Reset() { + *x = ResourceInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceInfo) ProtoMessage() {} + +func (x *ResourceInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceInfo.ProtoReflect.Descriptor instead. +func (*ResourceInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{7} +} + +func (x *ResourceInfo) GetResourceType() string { + if x != nil { + return x.ResourceType + } + return "" +} + +func (x *ResourceInfo) GetResourceName() string { + if x != nil { + return x.ResourceName + } + return "" +} + +func (x *ResourceInfo) GetOwner() string { + if x != nil { + return x.Owner + } + return "" +} + +func (x *ResourceInfo) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// Provides links to documentation or for performing an out of band action. +// +// For example, if a quota check failed with an error indicating the calling +// project hasn't enabled the accessed service, this can contain a URL pointing +// directly to the right place in the developer console to flip the bit. +type Help struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // URL(s) pointing to additional information on handling the current error. + Links []*Help_Link `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"` +} + +func (x *Help) Reset() { + *x = Help{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Help) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Help) ProtoMessage() {} + +func (x *Help) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Help.ProtoReflect.Descriptor instead. +func (*Help) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8} +} + +func (x *Help) GetLinks() []*Help_Link { + if x != nil { + return x.Links + } + return nil +} + +// Provides a localized error message that is safe to return to the user +// which can be attached to an RPC error. +type LocalizedMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The locale used following the specification defined at + // http://www.rfc-editor.org/rfc/bcp/bcp47.txt. + // Examples are: "en-US", "fr-CH", "es-MX" + Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"` + // The localized error message in the above locale. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *LocalizedMessage) Reset() { + *x = LocalizedMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalizedMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalizedMessage) ProtoMessage() {} + +func (x *LocalizedMessage) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalizedMessage.ProtoReflect.Descriptor instead. +func (*LocalizedMessage) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{9} +} + +func (x *LocalizedMessage) GetLocale() string { + if x != nil { + return x.Locale + } + return "" +} + +func (x *LocalizedMessage) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +// A message type used to describe a single quota violation. For example, a +// daily quota or a custom quota that was exceeded. +type QuotaFailure_Violation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The subject on which the quota check failed. + // For example, "clientip:" or "project:". + Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` + // A description of how the quota check failed. Clients can use this + // description to find more about the quota configuration in the service's + // public documentation, or find the relevant quota limit to adjust through + // developer console. + // + // For example: "Service disabled" or "Daily Limit for read operations + // exceeded". + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *QuotaFailure_Violation) Reset() { + *x = QuotaFailure_Violation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuotaFailure_Violation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuotaFailure_Violation) ProtoMessage() {} + +func (x *QuotaFailure_Violation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuotaFailure_Violation.ProtoReflect.Descriptor instead. +func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *QuotaFailure_Violation) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *QuotaFailure_Violation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// A message type used to describe a single precondition failure. +type PreconditionFailure_Violation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of PreconditionFailure. We recommend using a service-specific + // enum type to define the supported precondition violation subjects. For + // example, "TOS" for "Terms of Service violation". + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The subject, relative to the type, that failed. + // For example, "google.com/cloud" relative to the "TOS" type would indicate + // which terms of service is being referenced. + Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + // A description of how the precondition failed. Developers can use this + // description to understand how to fix the failure. + // + // For example: "Terms of service not accepted". + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *PreconditionFailure_Violation) Reset() { + *x = PreconditionFailure_Violation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PreconditionFailure_Violation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PreconditionFailure_Violation) ProtoMessage() {} + +func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PreconditionFailure_Violation.ProtoReflect.Descriptor instead. +func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *PreconditionFailure_Violation) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *PreconditionFailure_Violation) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *PreconditionFailure_Violation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// A message type used to describe a single bad request field. +type BadRequest_FieldViolation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A path leading to a field in the request body. The value will be a + // sequence of dot-separated identifiers that identify a protocol buffer + // field. E.g., "field_violations.field" would identify this field. + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + // A description of why the request element is bad. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *BadRequest_FieldViolation) Reset() { + *x = BadRequest_FieldViolation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BadRequest_FieldViolation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BadRequest_FieldViolation) ProtoMessage() {} + +func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BadRequest_FieldViolation.ProtoReflect.Descriptor instead. +func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *BadRequest_FieldViolation) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *BadRequest_FieldViolation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// Describes a URL link. +type Help_Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes what the link offers. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The URL of the link. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *Help_Link) Reset() { + *x = Help_Link{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Help_Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Help_Link) ProtoMessage() {} + +func (x *Help_Link) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Help_Link.ProtoReflect.Descriptor instead. +func (*Help_Link) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *Help_Link) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Help_Link) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +var File_google_rpc_error_details_proto protoreflect.FileDescriptor + +var file_google_rpc_error_details_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x47, 0x0a, 0x09, + 0x52, 0x65, 0x74, 0x72, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, + 0x44, 0x65, 0x6c, 0x61, 0x79, 0x22, 0x48, 0x0a, 0x09, 0x44, 0x65, 0x62, 0x75, 0x67, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, + 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, + 0x9b, 0x01, 0x0a, 0x0c, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x12, 0x42, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, + 0x63, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, + 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x01, + 0x0a, 0x09, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x72, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x3f, 0x0a, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, + 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, + 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, + 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, + 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, + 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, + 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, + 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_error_details_proto_rawDescOnce sync.Once + file_google_rpc_error_details_proto_rawDescData = file_google_rpc_error_details_proto_rawDesc +) + +func file_google_rpc_error_details_proto_rawDescGZIP() []byte { + file_google_rpc_error_details_proto_rawDescOnce.Do(func() { + file_google_rpc_error_details_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_error_details_proto_rawDescData) + }) + return file_google_rpc_error_details_proto_rawDescData +} + +var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_google_rpc_error_details_proto_goTypes = []interface{}{ + (*RetryInfo)(nil), // 0: google.rpc.RetryInfo + (*DebugInfo)(nil), // 1: google.rpc.DebugInfo + (*QuotaFailure)(nil), // 2: google.rpc.QuotaFailure + (*ErrorInfo)(nil), // 3: google.rpc.ErrorInfo + (*PreconditionFailure)(nil), // 4: google.rpc.PreconditionFailure + (*BadRequest)(nil), // 5: google.rpc.BadRequest + (*RequestInfo)(nil), // 6: google.rpc.RequestInfo + (*ResourceInfo)(nil), // 7: google.rpc.ResourceInfo + (*Help)(nil), // 8: google.rpc.Help + (*LocalizedMessage)(nil), // 9: google.rpc.LocalizedMessage + (*QuotaFailure_Violation)(nil), // 10: google.rpc.QuotaFailure.Violation + nil, // 11: google.rpc.ErrorInfo.MetadataEntry + (*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation + (*BadRequest_FieldViolation)(nil), // 13: google.rpc.BadRequest.FieldViolation + (*Help_Link)(nil), // 14: google.rpc.Help.Link + (*durationpb.Duration)(nil), // 15: google.protobuf.Duration +} +var file_google_rpc_error_details_proto_depIdxs = []int32{ + 15, // 0: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration + 10, // 1: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation + 11, // 2: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation + 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation + 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_google_rpc_error_details_proto_init() } +func file_google_rpc_error_details_proto_init() { + if File_google_rpc_error_details_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DebugInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuotaFailure); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PreconditionFailure); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BadRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Help); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalizedMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuotaFailure_Violation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PreconditionFailure_Violation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BadRequest_FieldViolation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Help_Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_error_details_proto_rawDesc, + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_error_details_proto_goTypes, + DependencyIndexes: file_google_rpc_error_details_proto_depIdxs, + MessageInfos: file_google_rpc_error_details_proto_msgTypes, + }.Build() + File_google_rpc_error_details_proto = out.File + file_google_rpc_error_details_proto_rawDesc = nil + file_google_rpc_error_details_proto_goTypes = nil + file_google_rpc_error_details_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go new file mode 100644 index 00000000000..38ef56f73ca --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go @@ -0,0 +1,232 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/type/expr.proto + +package expr + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Represents a textual expression in the Common Expression Language (CEL) +// syntax. CEL is a C-like expression language. The syntax and semantics of CEL +// are documented at https://github.com/google/cel-spec. +// +// Example (Comparison): +// +// title: "Summary size limit" +// description: "Determines if a summary is less than 100 chars" +// expression: "document.summary.size() < 100" +// +// Example (Equality): +// +// title: "Requestor is owner" +// description: "Determines if requestor is the document owner" +// expression: "document.owner == request.auth.claims.email" +// +// Example (Logic): +// +// title: "Public documents" +// description: "Determine whether the document should be publicly visible" +// expression: "document.type != 'private' && document.type != 'internal'" +// +// Example (Data Manipulation): +// +// title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + string(document.create_time)" +// +// The exact variables and functions that may be referenced within an expression +// are determined by the service that evaluates it. See the service +// documentation for additional information. +type Expr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Textual representation of an expression in Common Expression Language + // syntax. + Expression string `protobuf:"bytes,1,opt,name=expression,proto3" json:"expression,omitempty"` + // Optional. Title for the expression, i.e. a short string describing + // its purpose. This can be used e.g. in UIs which allow to enter the + // expression. + Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` + // Optional. Description of the expression. This is a longer text which + // describes the expression, e.g. when hovered over it in a UI. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Optional. String indicating the location of the expression for error + // reporting, e.g. a file name and a position in the file. + Location string `protobuf:"bytes,4,opt,name=location,proto3" json:"location,omitempty"` +} + +func (x *Expr) Reset() { + *x = Expr{} + if protoimpl.UnsafeEnabled { + mi := &file_google_type_expr_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr) ProtoMessage() {} + +func (x *Expr) ProtoReflect() protoreflect.Message { + mi := &file_google_type_expr_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr.ProtoReflect.Descriptor instead. +func (*Expr) Descriptor() ([]byte, []int) { + return file_google_type_expr_proto_rawDescGZIP(), []int{0} +} + +func (x *Expr) GetExpression() string { + if x != nil { + return x.Expression + } + return "" +} + +func (x *Expr) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *Expr) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Expr) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +var File_google_type_expr_proto protoreflect.FileDescriptor + +var file_google_type_expr_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x22, 0x7a, 0x0a, 0x04, 0x45, 0x78, 0x70, 0x72, 0x12, 0x1e, 0x0a, + 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, + 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x5a, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x42, 0x09, 0x45, 0x78, 0x70, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, + 0x70, 0x72, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_type_expr_proto_rawDescOnce sync.Once + file_google_type_expr_proto_rawDescData = file_google_type_expr_proto_rawDesc +) + +func file_google_type_expr_proto_rawDescGZIP() []byte { + file_google_type_expr_proto_rawDescOnce.Do(func() { + file_google_type_expr_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_expr_proto_rawDescData) + }) + return file_google_type_expr_proto_rawDescData +} + +var file_google_type_expr_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_type_expr_proto_goTypes = []interface{}{ + (*Expr)(nil), // 0: google.type.Expr +} +var file_google_type_expr_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_type_expr_proto_init() } +func file_google_type_expr_proto_init() { + if File_google_type_expr_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_type_expr_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_type_expr_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_type_expr_proto_goTypes, + DependencyIndexes: file_google_type_expr_proto_depIdxs, + MessageInfos: file_google_type_expr_proto_msgTypes, + }.Build() + File_google_type_expr_proto = out.File + file_google_type_expr_proto_rawDesc = nil + file_google_type_expr_proto_goTypes = nil + file_google_type_expr_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go new file mode 100644 index 00000000000..bf4c3cb4449 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -0,0 +1,962 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines the GRPCLB LoadBalancing protocol. +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/lb/v1/load_balancer.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: grpc/lb/v1/load_balancer.proto + +package grpc_lb_v1 + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type LoadBalanceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to LoadBalanceRequestType: + // + // *LoadBalanceRequest_InitialRequest + // *LoadBalanceRequest_ClientStats + LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` +} + +func (x *LoadBalanceRequest) Reset() { + *x = LoadBalanceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoadBalanceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoadBalanceRequest) ProtoMessage() {} + +func (x *LoadBalanceRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LoadBalanceRequest.ProtoReflect.Descriptor instead. +func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{0} +} + +func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType { + if m != nil { + return m.LoadBalanceRequestType + } + return nil +} + +func (x *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest { + if x, ok := x.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok { + return x.InitialRequest + } + return nil +} + +func (x *LoadBalanceRequest) GetClientStats() *ClientStats { + if x, ok := x.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok { + return x.ClientStats + } + return nil +} + +type isLoadBalanceRequest_LoadBalanceRequestType interface { + isLoadBalanceRequest_LoadBalanceRequestType() +} + +type LoadBalanceRequest_InitialRequest struct { + // This message should be sent on the first request to the load balancer. + InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,proto3,oneof"` +} + +type LoadBalanceRequest_ClientStats struct { + // The client stats should be periodically reported to the load balancer + // based on the duration defined in the InitialLoadBalanceResponse. + ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,proto3,oneof"` +} + +func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {} + +func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {} + +type InitialLoadBalanceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the load balanced service (e.g., service.googleapis.com). Its + // length should be less than 256 bytes. + // The name might include a port number. How to handle the port number is up + // to the balancer. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *InitialLoadBalanceRequest) Reset() { + *x = InitialLoadBalanceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitialLoadBalanceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitialLoadBalanceRequest) ProtoMessage() {} + +func (x *InitialLoadBalanceRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitialLoadBalanceRequest.ProtoReflect.Descriptor instead. +func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{1} +} + +func (x *InitialLoadBalanceRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Contains the number of calls finished for a particular load balance token. +type ClientStatsPerToken struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // See Server.load_balance_token. + LoadBalanceToken string `protobuf:"bytes,1,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` + // The total number of RPCs that finished associated with the token. + NumCalls int64 `protobuf:"varint,2,opt,name=num_calls,json=numCalls,proto3" json:"num_calls,omitempty"` +} + +func (x *ClientStatsPerToken) Reset() { + *x = ClientStatsPerToken{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientStatsPerToken) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientStatsPerToken) ProtoMessage() {} + +func (x *ClientStatsPerToken) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientStatsPerToken.ProtoReflect.Descriptor instead. +func (*ClientStatsPerToken) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{2} +} + +func (x *ClientStatsPerToken) GetLoadBalanceToken() string { + if x != nil { + return x.LoadBalanceToken + } + return "" +} + +func (x *ClientStatsPerToken) GetNumCalls() int64 { + if x != nil { + return x.NumCalls + } + return 0 +} + +// Contains client level statistics that are useful to load balancing. Each +// count except the timestamp should be reset to zero after reporting the stats. +type ClientStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The timestamp of generating the report. + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The total number of RPCs that started. + NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted,proto3" json:"num_calls_started,omitempty"` + // The total number of RPCs that finished. + NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished,proto3" json:"num_calls_finished,omitempty"` + // The total number of RPCs that failed to reach a server except dropped RPCs. + NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend,proto3" json:"num_calls_finished_with_client_failed_to_send,omitempty"` + // The total number of RPCs that finished and are known to have been received + // by a server. + NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived,proto3" json:"num_calls_finished_known_received,omitempty"` + // The list of dropped calls. + CallsFinishedWithDrop []*ClientStatsPerToken `protobuf:"bytes,8,rep,name=calls_finished_with_drop,json=callsFinishedWithDrop,proto3" json:"calls_finished_with_drop,omitempty"` +} + +func (x *ClientStats) Reset() { + *x = ClientStats{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientStats) ProtoMessage() {} + +func (x *ClientStats) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientStats.ProtoReflect.Descriptor instead. +func (*ClientStats) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{3} +} + +func (x *ClientStats) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *ClientStats) GetNumCallsStarted() int64 { + if x != nil { + return x.NumCallsStarted + } + return 0 +} + +func (x *ClientStats) GetNumCallsFinished() int64 { + if x != nil { + return x.NumCallsFinished + } + return 0 +} + +func (x *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 { + if x != nil { + return x.NumCallsFinishedWithClientFailedToSend + } + return 0 +} + +func (x *ClientStats) GetNumCallsFinishedKnownReceived() int64 { + if x != nil { + return x.NumCallsFinishedKnownReceived + } + return 0 +} + +func (x *ClientStats) GetCallsFinishedWithDrop() []*ClientStatsPerToken { + if x != nil { + return x.CallsFinishedWithDrop + } + return nil +} + +type LoadBalanceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to LoadBalanceResponseType: + // + // *LoadBalanceResponse_InitialResponse + // *LoadBalanceResponse_ServerList + // *LoadBalanceResponse_FallbackResponse + LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"` +} + +func (x *LoadBalanceResponse) Reset() { + *x = LoadBalanceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoadBalanceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoadBalanceResponse) ProtoMessage() {} + +func (x *LoadBalanceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LoadBalanceResponse.ProtoReflect.Descriptor instead. +func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{4} +} + +func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType { + if m != nil { + return m.LoadBalanceResponseType + } + return nil +} + +func (x *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse { + if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok { + return x.InitialResponse + } + return nil +} + +func (x *LoadBalanceResponse) GetServerList() *ServerList { + if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok { + return x.ServerList + } + return nil +} + +func (x *LoadBalanceResponse) GetFallbackResponse() *FallbackResponse { + if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_FallbackResponse); ok { + return x.FallbackResponse + } + return nil +} + +type isLoadBalanceResponse_LoadBalanceResponseType interface { + isLoadBalanceResponse_LoadBalanceResponseType() +} + +type LoadBalanceResponse_InitialResponse struct { + // This message should be sent on the first response to the client. + InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,proto3,oneof"` +} + +type LoadBalanceResponse_ServerList struct { + // Contains the list of servers selected by the load balancer. The client + // should send requests to these servers in the specified order. + ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,proto3,oneof"` +} + +type LoadBalanceResponse_FallbackResponse struct { + // If this field is set, then the client should eagerly enter fallback + // mode (even if there are existing, healthy connections to backends). + FallbackResponse *FallbackResponse `protobuf:"bytes,3,opt,name=fallback_response,json=fallbackResponse,proto3,oneof"` +} + +func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} + +func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {} + +func (*LoadBalanceResponse_FallbackResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} + +type FallbackResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *FallbackResponse) Reset() { + *x = FallbackResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FallbackResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FallbackResponse) ProtoMessage() {} + +func (x *FallbackResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FallbackResponse.ProtoReflect.Descriptor instead. +func (*FallbackResponse) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{5} +} + +type InitialLoadBalanceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This interval defines how often the client should send the client stats + // to the load balancer. Stats should only be reported when the duration is + // positive. + ClientStatsReportInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval,proto3" json:"client_stats_report_interval,omitempty"` +} + +func (x *InitialLoadBalanceResponse) Reset() { + *x = InitialLoadBalanceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitialLoadBalanceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitialLoadBalanceResponse) ProtoMessage() {} + +func (x *InitialLoadBalanceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitialLoadBalanceResponse.ProtoReflect.Descriptor instead. +func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{6} +} + +func (x *InitialLoadBalanceResponse) GetClientStatsReportInterval() *durationpb.Duration { + if x != nil { + return x.ClientStatsReportInterval + } + return nil +} + +type ServerList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Contains a list of servers selected by the load balancer. The list will + // be updated when server resolutions change or as needed to balance load + // across more servers. The client should consume the server list in order + // unless instructed otherwise via the client_config. + Servers []*Server `protobuf:"bytes,1,rep,name=servers,proto3" json:"servers,omitempty"` +} + +func (x *ServerList) Reset() { + *x = ServerList{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerList) ProtoMessage() {} + +func (x *ServerList) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerList.ProtoReflect.Descriptor instead. +func (*ServerList) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{7} +} + +func (x *ServerList) GetServers() []*Server { + if x != nil { + return x.Servers + } + return nil +} + +// Contains server information. When the drop field is not true, use the other +// fields. +type Server struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A resolved address for the server, serialized in network-byte-order. It may + // either be an IPv4 or IPv6 address. + IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + // A resolved port number for the server. + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + // An opaque but printable token for load reporting. The client must include + // the token of the picked server into the initial metadata when it starts a + // call to that server. The token is used by the server to verify the request + // and to allow the server to report load to the gRPC LB system. The token is + // also used in client stats for reporting dropped calls. + // + // Its length can be variable but must be less than 50 bytes. + LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` + // Indicates whether this particular request should be dropped by the client. + // If the request is dropped, there will be a corresponding entry in + // ClientStats.calls_finished_with_drop. + Drop bool `protobuf:"varint,4,opt,name=drop,proto3" json:"drop,omitempty"` +} + +func (x *Server) Reset() { + *x = Server{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Server) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Server) ProtoMessage() {} + +func (x *Server) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Server.ProtoReflect.Descriptor instead. +func (*Server) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{8} +} + +func (x *Server) GetIpAddress() []byte { + if x != nil { + return x.IpAddress + } + return nil +} + +func (x *Server) GetPort() int32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *Server) GetLoadBalanceToken() string { + if x != nil { + return x.LoadBalanceToken + } + return "" +} + +func (x *Server) GetDrop() bool { + if x != nil { + return x.Drop + } + return false +} + +var File_grpc_lb_v1_load_balancer_proto protoreflect.FileDescriptor + +var file_grpc_lb_v1_load_balancer_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x62, 0x2f, 0x76, 0x31, 0x2f, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0a, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x01, + 0x0a, 0x12, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0e, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x42, 0x1b, 0x0a, 0x19, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x22, 0x2f, 0x0a, 0x19, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x22, 0x60, 0x0a, 0x13, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x50, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x75, 0x6d, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6e, 0x75, 0x6d, 0x43, + 0x61, 0x6c, 0x6c, 0x73, 0x22, 0xb0, 0x03, 0x0a, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2a, + 0x0a, 0x11, 0x6e, 0x75, 0x6d, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x6e, 0x75, 0x6d, 0x43, 0x61, + 0x6c, 0x6c, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x75, + 0x6d, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x73, + 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x12, 0x5d, 0x0a, 0x2d, 0x6e, 0x75, 0x6d, 0x5f, + 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x77, + 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, + 0x64, 0x5f, 0x74, 0x6f, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x26, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, + 0x64, 0x57, 0x69, 0x74, 0x68, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, + 0x64, 0x54, 0x6f, 0x53, 0x65, 0x6e, 0x64, 0x12, 0x48, 0x0a, 0x21, 0x6e, 0x75, 0x6d, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x1d, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x46, 0x69, 0x6e, 0x69, + 0x73, 0x68, 0x65, 0x64, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, + 0x64, 0x12, 0x58, 0x0a, 0x18, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, + 0x68, 0x65, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x18, 0x08, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x15, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x46, 0x69, 0x6e, 0x69, 0x73, + 0x68, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x44, 0x72, 0x6f, 0x70, 0x4a, 0x04, 0x08, 0x04, 0x10, + 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x90, 0x02, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x53, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4c, 0x6f, + 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x48, 0x00, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6c, + 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, + 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, + 0x4b, 0x0a, 0x11, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x10, 0x66, 0x61, 0x6c, 0x6c, + 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x1c, 0x0a, 0x1a, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x12, 0x0a, 0x10, 0x46, 0x61, + 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7e, + 0x0a, 0x1a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x1c, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x72, 0x65, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x19, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x40, + 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x07, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, + 0x22, 0x83, 0x01, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x69, + 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2c, + 0x0a, 0x12, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x64, 0x72, 0x6f, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x72, 0x6f, 0x70, + 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x32, 0x62, 0x0a, 0x0c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0b, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x65, 0x4c, 0x6f, 0x61, 0x64, 0x12, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x57, 0x0a, 0x0d, 0x69, 0x6f, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x72, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x62, + 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_lb_v1_load_balancer_proto_rawDescOnce sync.Once + file_grpc_lb_v1_load_balancer_proto_rawDescData = file_grpc_lb_v1_load_balancer_proto_rawDesc +) + +func file_grpc_lb_v1_load_balancer_proto_rawDescGZIP() []byte { + file_grpc_lb_v1_load_balancer_proto_rawDescOnce.Do(func() { + file_grpc_lb_v1_load_balancer_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_lb_v1_load_balancer_proto_rawDescData) + }) + return file_grpc_lb_v1_load_balancer_proto_rawDescData +} + +var file_grpc_lb_v1_load_balancer_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_grpc_lb_v1_load_balancer_proto_goTypes = []interface{}{ + (*LoadBalanceRequest)(nil), // 0: grpc.lb.v1.LoadBalanceRequest + (*InitialLoadBalanceRequest)(nil), // 1: grpc.lb.v1.InitialLoadBalanceRequest + (*ClientStatsPerToken)(nil), // 2: grpc.lb.v1.ClientStatsPerToken + (*ClientStats)(nil), // 3: grpc.lb.v1.ClientStats + (*LoadBalanceResponse)(nil), // 4: grpc.lb.v1.LoadBalanceResponse + (*FallbackResponse)(nil), // 5: grpc.lb.v1.FallbackResponse + (*InitialLoadBalanceResponse)(nil), // 6: grpc.lb.v1.InitialLoadBalanceResponse + (*ServerList)(nil), // 7: grpc.lb.v1.ServerList + (*Server)(nil), // 8: grpc.lb.v1.Server + (*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 10: google.protobuf.Duration +} +var file_grpc_lb_v1_load_balancer_proto_depIdxs = []int32{ + 1, // 0: grpc.lb.v1.LoadBalanceRequest.initial_request:type_name -> grpc.lb.v1.InitialLoadBalanceRequest + 3, // 1: grpc.lb.v1.LoadBalanceRequest.client_stats:type_name -> grpc.lb.v1.ClientStats + 9, // 2: grpc.lb.v1.ClientStats.timestamp:type_name -> google.protobuf.Timestamp + 2, // 3: grpc.lb.v1.ClientStats.calls_finished_with_drop:type_name -> grpc.lb.v1.ClientStatsPerToken + 6, // 4: grpc.lb.v1.LoadBalanceResponse.initial_response:type_name -> grpc.lb.v1.InitialLoadBalanceResponse + 7, // 5: grpc.lb.v1.LoadBalanceResponse.server_list:type_name -> grpc.lb.v1.ServerList + 5, // 6: grpc.lb.v1.LoadBalanceResponse.fallback_response:type_name -> grpc.lb.v1.FallbackResponse + 10, // 7: grpc.lb.v1.InitialLoadBalanceResponse.client_stats_report_interval:type_name -> google.protobuf.Duration + 8, // 8: grpc.lb.v1.ServerList.servers:type_name -> grpc.lb.v1.Server + 0, // 9: grpc.lb.v1.LoadBalancer.BalanceLoad:input_type -> grpc.lb.v1.LoadBalanceRequest + 4, // 10: grpc.lb.v1.LoadBalancer.BalanceLoad:output_type -> grpc.lb.v1.LoadBalanceResponse + 10, // [10:11] is the sub-list for method output_type + 9, // [9:10] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_grpc_lb_v1_load_balancer_proto_init() } +func file_grpc_lb_v1_load_balancer_proto_init() { + if File_grpc_lb_v1_load_balancer_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoadBalanceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitialLoadBalanceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientStatsPerToken); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoadBalanceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FallbackResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitialLoadBalanceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Server); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*LoadBalanceRequest_InitialRequest)(nil), + (*LoadBalanceRequest_ClientStats)(nil), + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*LoadBalanceResponse_InitialResponse)(nil), + (*LoadBalanceResponse_ServerList)(nil), + (*LoadBalanceResponse_FallbackResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_lb_v1_load_balancer_proto_rawDesc, + NumEnums: 0, + NumMessages: 9, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_lb_v1_load_balancer_proto_goTypes, + DependencyIndexes: file_grpc_lb_v1_load_balancer_proto_depIdxs, + MessageInfos: file_grpc_lb_v1_load_balancer_proto_msgTypes, + }.Build() + File_grpc_lb_v1_load_balancer_proto = out.File + file_grpc_lb_v1_load_balancer_proto_rawDesc = nil + file_grpc_lb_v1_load_balancer_proto_goTypes = nil + file_grpc_lb_v1_load_balancer_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go new file mode 100644 index 00000000000..cf1034830d5 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -0,0 +1,156 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines the GRPCLB LoadBalancing protocol. +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/lb/v1/load_balancer.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.14.0 +// source: grpc/lb/v1/load_balancer.proto + +package grpc_lb_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// LoadBalancerClient is the client API for LoadBalancer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type LoadBalancerClient interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) +} + +type loadBalancerClient struct { + cc grpc.ClientConnInterface +} + +func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { + return &loadBalancerClient{cc} +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { + stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &loadBalancerBalanceLoadClient{stream} + return x, nil +} + +type LoadBalancer_BalanceLoadClient interface { + Send(*LoadBalanceRequest) error + Recv() (*LoadBalanceResponse, error) + grpc.ClientStream +} + +type loadBalancerBalanceLoadClient struct { + grpc.ClientStream +} + +func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) { + m := new(LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// LoadBalancerServer is the server API for LoadBalancer service. +// All implementations should embed UnimplementedLoadBalancerServer +// for forward compatibility +type LoadBalancerServer interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(LoadBalancer_BalanceLoadServer) error +} + +// UnimplementedLoadBalancerServer should be embedded to have forward compatible implementations. +type UnimplementedLoadBalancerServer struct { +} + +func (UnimplementedLoadBalancerServer) BalanceLoad(LoadBalancer_BalanceLoadServer) error { + return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented") +} + +// UnsafeLoadBalancerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to LoadBalancerServer will +// result in compilation errors. +type UnsafeLoadBalancerServer interface { + mustEmbedUnimplementedLoadBalancerServer() +} + +func RegisterLoadBalancerServer(s grpc.ServiceRegistrar, srv LoadBalancerServer) { + s.RegisterService(&LoadBalancer_ServiceDesc, srv) +} + +func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) +} + +type LoadBalancer_BalanceLoadServer interface { + Send(*LoadBalanceResponse) error + Recv() (*LoadBalanceRequest, error) + grpc.ServerStream +} + +type loadBalancerBalanceLoadServer struct { + grpc.ServerStream +} + +func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) { + m := new(LoadBalanceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// LoadBalancer_ServiceDesc is the grpc.ServiceDesc for LoadBalancer service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var LoadBalancer_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.lb.v1.LoadBalancer", + HandlerType: (*LoadBalancerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "BalanceLoad", + Handler: _LoadBalancer_BalanceLoad_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/lb/v1/load_balancer.proto", +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go new file mode 100644 index 00000000000..dd15810d0ae --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -0,0 +1,520 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclb defines a grpclb balancer. +// +// To install grpclb balancer, import this package as: +// +// import _ "google.golang.org/grpc/balancer/grpclb" +package grpclb + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" + + durationpb "github.com/golang/protobuf/ptypes/duration" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" +) + +const ( + lbTokenKey = "lb-token" + defaultFallbackTimeout = 10 * time.Second + grpclbName = "grpclb" +) + +var errServerTerminatedConnection = errors.New("grpclb: failed to recv server list: server terminated connection") +var logger = grpclog.Component("grpclb") + +func convertDuration(d *durationpb.Duration) time.Duration { + if d == nil { + return 0 + } + return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond +} + +// Client API for LoadBalancer service. +// Mostly copied from generated pb.go file. +// To avoid circular dependency. +type loadBalancerClient struct { + cc *grpc.ClientConn +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (*balanceLoadClientStream, error) { + desc := &grpc.StreamDesc{ + StreamName: "BalanceLoad", + ServerStreams: true, + ClientStreams: true, + } + stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &balanceLoadClientStream{stream} + return x, nil +} + +type balanceLoadClientStream struct { + grpc.ClientStream +} + +func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) { + m := new(lbpb.LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func init() { + balancer.Register(newLBBuilder()) + dns.EnableSRVLookups = true +} + +// newLBBuilder creates a builder for grpclb. +func newLBBuilder() balancer.Builder { + return newLBBuilderWithFallbackTimeout(defaultFallbackTimeout) +} + +// newLBBuilderWithFallbackTimeout creates a grpclb builder with the given +// fallbackTimeout. If no response is received from the remote balancer within +// fallbackTimeout, the backend addresses from the resolved address list will be +// used. +// +// Only call this function when a non-default fallback timeout is needed. +func newLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder { + return &lbBuilder{ + fallbackTimeout: fallbackTimeout, + } +} + +type lbBuilder struct { + fallbackTimeout time.Duration +} + +func (b *lbBuilder) Name() string { + return grpclbName +} + +func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + // This generates a manual resolver builder with a fixed scheme. This + // scheme will be used to dial to remote LB, so we can send filtered + // address updates to remote LB ClientConn using this manual resolver. + r := &lbManualResolver{scheme: "grpclb-internal", ccb: cc} + + lb := &lbBalancer{ + cc: newLBCacheClientConn(cc), + dialTarget: opt.Target.Endpoint, + target: opt.Target.Endpoint, + opt: opt, + fallbackTimeout: b.fallbackTimeout, + doneCh: make(chan struct{}), + + manualResolver: r, + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + picker: &errPicker{err: balancer.ErrNoSubConnAvailable}, + clientStats: newRPCStats(), + backoff: backoff.DefaultExponential, // TODO: make backoff configurable. + } + + var err error + if opt.CredsBundle != nil { + lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer) + if err != nil { + logger.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err) + } + lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer) + if err != nil { + logger.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err) + } + } + + return lb +} + +type lbBalancer struct { + cc *lbCacheClientConn + dialTarget string // user's dial target + target string // same as dialTarget unless overridden in service config + opt balancer.BuildOptions + + usePickFirst bool + + // grpclbClientConnCreds is the creds bundle to be used to connect to grpclb + // servers. If it's nil, use the TransportCredentials from BuildOptions + // instead. + grpclbClientConnCreds credentials.Bundle + // grpclbBackendCreds is the creds bundle to be used for addresses that are + // returned by grpclb server. If it's nil, don't set anything when creating + // SubConns. + grpclbBackendCreds credentials.Bundle + + fallbackTimeout time.Duration + doneCh chan struct{} + + // manualResolver is used in the remote LB ClientConn inside grpclb. When + // resolved address updates are received by grpclb, filtered updates will be + // send to remote LB ClientConn through this resolver. + manualResolver *lbManualResolver + // The ClientConn to talk to the remote balancer. + ccRemoteLB *remoteBalancerCCWrapper + // backoff for calling remote balancer. + backoff backoff.Strategy + + // Support client side load reporting. Each picker gets a reference to this, + // and will update its content. + clientStats *rpcStats + + mu sync.Mutex // guards everything following. + // The full server list including drops, used to check if the newly received + // serverList contains anything new. Each generate picker will also have + // reference to this list to do the first layer pick. + fullServerList []*lbpb.Server + // Backend addresses. It's kept so the addresses are available when + // switching between round_robin and pickfirst. + backendAddrs []resolver.Address + // All backends addresses, with metadata set to nil. This list contains all + // backend addresses in the same order and with the same duplicates as in + // serverlist. When generating picker, a SubConn slice with the same order + // but with only READY SCs will be gerenated. + backendAddrsWithoutMetadata []resolver.Address + // Roundrobin functionalities. + state connectivity.State + subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn. + scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns. + picker balancer.Picker + // Support fallback to resolved backend addresses if there's no response + // from remote balancer within fallbackTimeout. + remoteBalancerConnected bool + serverListReceived bool + inFallback bool + // resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set + // when resolved address updates are received, and read in the goroutine + // handling fallback. + resolvedBackendAddrs []resolver.Address + connErr error // the last connection error +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker from +// it. The picker +// - always returns ErrTransientFailure if the balancer is in TransientFailure, +// - does two layer roundrobin pick otherwise. +// +// Caller must hold lb.mu. +func (lb *lbBalancer) regeneratePicker(resetDrop bool) { + if lb.state == connectivity.TransientFailure { + lb.picker = &errPicker{err: fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr)} + return + } + + if lb.state == connectivity.Connecting { + lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + return + } + + var readySCs []balancer.SubConn + if lb.usePickFirst { + for _, sc := range lb.subConns { + readySCs = append(readySCs, sc) + break + } + } else { + for _, a := range lb.backendAddrsWithoutMetadata { + if sc, ok := lb.subConns[a]; ok { + if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready { + readySCs = append(readySCs, sc) + } + } + } + } + + if len(readySCs) <= 0 { + // If there's no ready SubConns, always re-pick. This is to avoid drops + // unless at least one SubConn is ready. Otherwise we may drop more + // often than want because of drops + re-picks(which become re-drops). + // + // This doesn't seem to be necessary after the connecting check above. + // Kept for safety. + lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + return + } + if lb.inFallback { + lb.picker = newRRPicker(readySCs) + return + } + if resetDrop { + lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) + return + } + prevLBPicker, ok := lb.picker.(*lbPicker) + if !ok { + lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) + return + } + prevLBPicker.updateReadySCs(readySCs) +} + +// aggregateSubConnStats calculate the aggregated state of SubConns in +// lb.SubConns. These SubConns are subconns in use (when switching between +// fallback and grpclb). lb.scState contains states for all SubConns, including +// those in cache (SubConns are cached for 10 seconds after remove). +// +// The aggregated state is: +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting or IDLE, the aggregated state is Connecting; +// - It's OK to consider IDLE as Connecting. SubConns never stay in IDLE, +// they start to connect immediately. But there's a race between the overall +// state is reported, and when the new SubConn state arrives. And SubConns +// never go back to IDLE. +// - Else the aggregated state is TransientFailure. +func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { + var numConnecting uint64 + + for _, sc := range lb.subConns { + if state, ok := lb.scStates[sc]; ok { + switch state { + case connectivity.Ready: + return connectivity.Ready + case connectivity.Connecting, connectivity.Idle: + numConnecting++ + } + } + } + if numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} + +func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { + s := scs.ConnectivityState + if logger.V(2) { + logger.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) + } + lb.mu.Lock() + defer lb.mu.Unlock() + + oldS, ok := lb.scStates[sc] + if !ok { + if logger.V(2) { + logger.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + lb.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(lb.scStates, sc) + case connectivity.TransientFailure: + lb.connErr = scs.ConnectionError + } + // Force regenerate picker if + // - this sc became ready from not-ready + // - this sc became not-ready from ready + lb.updateStateAndPicker((oldS == connectivity.Ready) != (s == connectivity.Ready), false) + + // Enter fallback when the aggregated state is not Ready and the connection + // to remote balancer is lost. + if lb.state != connectivity.Ready { + if !lb.inFallback && !lb.remoteBalancerConnected { + // Enter fallback. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + } + } +} + +// updateStateAndPicker re-calculate the aggregated state, and regenerate picker +// if overall state is changed. +// +// If forceRegeneratePicker is true, picker will be regenerated. +func (lb *lbBalancer) updateStateAndPicker(forceRegeneratePicker bool, resetDrop bool) { + oldAggrState := lb.state + lb.state = lb.aggregateSubConnStates() + // Regenerate picker when one of the following happens: + // - caller wants to regenerate + // - the aggregated state changed + if forceRegeneratePicker || (lb.state != oldAggrState) { + lb.regeneratePicker(resetDrop) + } + + lb.cc.UpdateState(balancer.State{ConnectivityState: lb.state, Picker: lb.picker}) +} + +// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use +// resolved backends (backends received from resolver, not from remote balancer) +// if no connection to remote balancers was successful. +func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) { + timer := time.NewTimer(fallbackTimeout) + defer timer.Stop() + select { + case <-timer.C: + case <-lb.doneCh: + return + } + lb.mu.Lock() + if lb.inFallback || lb.serverListReceived { + lb.mu.Unlock() + return + } + // Enter fallback. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + lb.mu.Unlock() +} + +func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { + lb.mu.Lock() + defer lb.mu.Unlock() + + // grpclb uses the user's dial target to populate the `Name` field of the + // `InitialLoadBalanceRequest` message sent to the remote balancer. But when + // grpclb is used a child policy in the context of RLS, we want the `Name` + // field to be populated with the value received from the RLS server. To + // support this use case, an optional "target_name" field has been added to + // the grpclb LB policy's config. If specified, it overrides the name of + // the target to be sent to the remote balancer; if not, the target to be + // sent to the balancer will continue to be obtained from the target URI + // passed to the gRPC client channel. Whenever that target to be sent to the + // balancer is updated, we need to restart the stream to the balancer as + // this target is sent in the first message on the stream. + if gc != nil { + target := lb.dialTarget + if gc.ServiceName != "" { + target = gc.ServiceName + } + if target != lb.target { + lb.target = target + if lb.ccRemoteLB != nil { + lb.ccRemoteLB.cancelRemoteBalancerCall() + } + } + } + + newUsePickFirst := childIsPickFirst(gc) + if lb.usePickFirst == newUsePickFirst { + return + } + if logger.V(2) { + logger.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst) + } + lb.refreshSubConns(lb.backendAddrs, lb.inFallback, newUsePickFirst) +} + +func (lb *lbBalancer) ResolverError(error) { + // Ignore resolver errors. GRPCLB is not selected unless the resolver + // works at least once. +} + +func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + if logger.V(2) { + logger.Infof("lbBalancer: UpdateClientConnState: %+v", ccs) + } + gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig) + lb.handleServiceConfig(gc) + + addrs := ccs.ResolverState.Addresses + + var remoteBalancerAddrs, backendAddrs []resolver.Address + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + a.Type = resolver.Backend + remoteBalancerAddrs = append(remoteBalancerAddrs, a) + } else { + backendAddrs = append(backendAddrs, a) + } + } + if sd := grpclbstate.Get(ccs.ResolverState); sd != nil { + // Override any balancer addresses provided via + // ccs.ResolverState.Addresses. + remoteBalancerAddrs = sd.BalancerAddresses + } + + if len(backendAddrs)+len(remoteBalancerAddrs) == 0 { + // There should be at least one address, either grpclb server or + // fallback. Empty address is not valid. + return balancer.ErrBadResolverState + } + + if len(remoteBalancerAddrs) == 0 { + if lb.ccRemoteLB != nil { + lb.ccRemoteLB.close() + lb.ccRemoteLB = nil + } + } else if lb.ccRemoteLB == nil { + // First time receiving resolved addresses, create a cc to remote + // balancers. + lb.newRemoteBalancerCCWrapper() + // Start the fallback goroutine. + go lb.fallbackToBackendsAfter(lb.fallbackTimeout) + } + + if lb.ccRemoteLB != nil { + // cc to remote balancers uses lb.manualResolver. Send the updated remote + // balancer addresses to it through manualResolver. + lb.manualResolver.UpdateState(resolver.State{Addresses: remoteBalancerAddrs}) + } + + lb.mu.Lock() + lb.resolvedBackendAddrs = backendAddrs + if len(remoteBalancerAddrs) == 0 || lb.inFallback { + // If there's no remote balancer address in ClientConn update, grpclb + // enters fallback mode immediately. + // + // If a new update is received while grpclb is in fallback, update the + // list of backends being used to the new fallback backends. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + } + lb.mu.Unlock() + return nil +} + +func (lb *lbBalancer) Close() { + select { + case <-lb.doneCh: + return + default: + } + close(lb.doneCh) + if lb.ccRemoteLB != nil { + lb.ccRemoteLB.close() + } + lb.cc.close() +} + +func (lb *lbBalancer) ExitIdle() {} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go new file mode 100644 index 00000000000..8942c31310a --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "encoding/json" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/serviceconfig" +) + +const ( + roundRobinName = roundrobin.Name + pickFirstName = grpc.PickFirstBalancerName +) + +type grpclbServiceConfig struct { + serviceconfig.LoadBalancingConfig + ChildPolicy *[]map[string]json.RawMessage + ServiceName string +} + +func (b *lbBuilder) ParseConfig(lbConfig json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + ret := &grpclbServiceConfig{} + if err := json.Unmarshal(lbConfig, ret); err != nil { + return nil, err + } + return ret, nil +} + +func childIsPickFirst(sc *grpclbServiceConfig) bool { + if sc == nil { + return false + } + childConfigs := sc.ChildPolicy + if childConfigs == nil { + return false + } + for _, childC := range *childConfigs { + // If round_robin exists before pick_first, return false + if _, ok := childC[roundRobinName]; ok { + return false + } + // If pick_first is before round_robin, return true + if _, ok := childC[pickFirstName]; ok { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go new file mode 100644 index 00000000000..39bc5cc71e8 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -0,0 +1,202 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "sync" + "sync/atomic" + + "google.golang.org/grpc/balancer" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/status" +) + +// rpcStats is same as lbpb.ClientStats, except that numCallsDropped is a map +// instead of a slice. +type rpcStats struct { + // Only access the following fields atomically. + numCallsStarted int64 + numCallsFinished int64 + numCallsFinishedWithClientFailedToSend int64 + numCallsFinishedKnownReceived int64 + + mu sync.Mutex + // map load_balance_token -> num_calls_dropped + numCallsDropped map[string]int64 +} + +func newRPCStats() *rpcStats { + return &rpcStats{ + numCallsDropped: make(map[string]int64), + } +} + +func isZeroStats(stats *lbpb.ClientStats) bool { + return len(stats.CallsFinishedWithDrop) == 0 && + stats.NumCallsStarted == 0 && + stats.NumCallsFinished == 0 && + stats.NumCallsFinishedWithClientFailedToSend == 0 && + stats.NumCallsFinishedKnownReceived == 0 +} + +// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats. +func (s *rpcStats) toClientStats() *lbpb.ClientStats { + stats := &lbpb.ClientStats{ + NumCallsStarted: atomic.SwapInt64(&s.numCallsStarted, 0), + NumCallsFinished: atomic.SwapInt64(&s.numCallsFinished, 0), + NumCallsFinishedWithClientFailedToSend: atomic.SwapInt64(&s.numCallsFinishedWithClientFailedToSend, 0), + NumCallsFinishedKnownReceived: atomic.SwapInt64(&s.numCallsFinishedKnownReceived, 0), + } + s.mu.Lock() + dropped := s.numCallsDropped + s.numCallsDropped = make(map[string]int64) + s.mu.Unlock() + for token, count := range dropped { + stats.CallsFinishedWithDrop = append(stats.CallsFinishedWithDrop, &lbpb.ClientStatsPerToken{ + LoadBalanceToken: token, + NumCalls: count, + }) + } + return stats +} + +func (s *rpcStats) drop(token string) { + atomic.AddInt64(&s.numCallsStarted, 1) + s.mu.Lock() + s.numCallsDropped[token]++ + s.mu.Unlock() + atomic.AddInt64(&s.numCallsFinished, 1) +} + +func (s *rpcStats) failedToSend() { + atomic.AddInt64(&s.numCallsStarted, 1) + atomic.AddInt64(&s.numCallsFinishedWithClientFailedToSend, 1) + atomic.AddInt64(&s.numCallsFinished, 1) +} + +func (s *rpcStats) knownReceived() { + atomic.AddInt64(&s.numCallsStarted, 1) + atomic.AddInt64(&s.numCallsFinishedKnownReceived, 1) + atomic.AddInt64(&s.numCallsFinished, 1) +} + +type errPicker struct { + // Pick always returns this err. + err error +} + +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, p.err +} + +// rrPicker does roundrobin on subConns. It's typically used when there's no +// response from remote balancer, and grpclb falls back to the resolved +// backends. +// +// It guaranteed that len(subConns) > 0. +type rrPicker struct { + mu sync.Mutex + subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. + subConnsNext int +} + +func newRRPicker(readySCs []balancer.SubConn) *rrPicker { + return &rrPicker{ + subConns: readySCs, + subConnsNext: grpcrand.Intn(len(readySCs)), + } +} + +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + p.mu.Lock() + defer p.mu.Unlock() + sc := p.subConns[p.subConnsNext] + p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) + return balancer.PickResult{SubConn: sc}, nil +} + +// lbPicker does two layers of picks: +// +// First layer: roundrobin on all servers in serverList, including drops and backends. +// - If it picks a drop, the RPC will fail as being dropped. +// - If it picks a backend, do a second layer pick to pick the real backend. +// +// Second layer: roundrobin on all READY backends. +// +// It's guaranteed that len(serverList) > 0. +type lbPicker struct { + mu sync.Mutex + serverList []*lbpb.Server + serverListNext int + subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. + subConnsNext int + + stats *rpcStats +} + +func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats *rpcStats) *lbPicker { + return &lbPicker{ + serverList: serverList, + subConns: readySCs, + subConnsNext: grpcrand.Intn(len(readySCs)), + stats: stats, + } +} + +func (p *lbPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + p.mu.Lock() + defer p.mu.Unlock() + + // Layer one roundrobin on serverList. + s := p.serverList[p.serverListNext] + p.serverListNext = (p.serverListNext + 1) % len(p.serverList) + + // If it's a drop, return an error and fail the RPC. + if s.Drop { + p.stats.drop(s.LoadBalanceToken) + return balancer.PickResult{}, status.Errorf(codes.Unavailable, "request dropped by grpclb") + } + + // If not a drop but there's no ready subConns. + if len(p.subConns) <= 0 { + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } + + // Return the next ready subConn in the list, also collect rpc stats. + sc := p.subConns[p.subConnsNext] + p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) + done := func(info balancer.DoneInfo) { + if !info.BytesSent { + p.stats.failedToSend() + } else if info.BytesReceived { + p.stats.knownReceived() + } + } + return balancer.PickResult{SubConn: sc, Done: done}, nil +} + +func (p *lbPicker) updateReadySCs(readySCs []balancer.SubConn) { + p.mu.Lock() + defer p.mu.Unlock() + + p.subConns = readySCs + p.subConnsNext = p.subConnsNext % len(readySCs) +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go new file mode 100644 index 00000000000..dab1959418e --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -0,0 +1,449 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "context" + "fmt" + "io" + "net" + "sync" + "time" + + "github.com/golang/protobuf/proto" + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/backoff" + imetadata "google.golang.org/grpc/internal/metadata" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +// processServerList updates balancer's internal state, create/remove SubConns +// and regenerates picker using the received serverList. +func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { + if logger.V(2) { + logger.Infof("lbBalancer: processing server list: %+v", l) + } + lb.mu.Lock() + defer lb.mu.Unlock() + + // Set serverListReceived to true so fallback will not take effect if it has + // not hit timeout. + lb.serverListReceived = true + + // If the new server list == old server list, do nothing. + if cmp.Equal(lb.fullServerList, l.Servers, cmp.Comparer(proto.Equal)) { + if logger.V(2) { + logger.Infof("lbBalancer: new serverlist same as the previous one, ignoring") + } + return + } + lb.fullServerList = l.Servers + + var backendAddrs []resolver.Address + for i, s := range l.Servers { + if s.Drop { + continue + } + + md := metadata.Pairs(lbTokenKey, s.LoadBalanceToken) + ip := net.IP(s.IpAddress) + ipStr := ip.String() + if ip.To4() == nil { + // Add square brackets to ipv6 addresses, otherwise net.Dial() and + // net.SplitHostPort() will return too many colons error. + ipStr = fmt.Sprintf("[%s]", ipStr) + } + addr := imetadata.Set(resolver.Address{Addr: fmt.Sprintf("%s:%d", ipStr, s.Port)}, md) + if logger.V(2) { + logger.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|", + i, ipStr, s.Port, s.LoadBalanceToken) + } + backendAddrs = append(backendAddrs, addr) + } + + // Call refreshSubConns to create/remove SubConns. If we are in fallback, + // this is also exiting fallback. + lb.refreshSubConns(backendAddrs, false, lb.usePickFirst) +} + +// refreshSubConns creates/removes SubConns with backendAddrs, and refreshes +// balancer state and picker. +// +// Caller must hold lb.mu. +func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback bool, pickFirst bool) { + opts := balancer.NewSubConnOptions{} + if !fallback { + opts.CredsBundle = lb.grpclbBackendCreds + } + + lb.backendAddrs = backendAddrs + lb.backendAddrsWithoutMetadata = nil + + fallbackModeChanged := lb.inFallback != fallback + lb.inFallback = fallback + if fallbackModeChanged && lb.inFallback { + // Clear previous received list when entering fallback, so if the server + // comes back and sends the same list again, the new addresses will be + // used. + lb.fullServerList = nil + } + + balancingPolicyChanged := lb.usePickFirst != pickFirst + oldUsePickFirst := lb.usePickFirst + lb.usePickFirst = pickFirst + + if fallbackModeChanged || balancingPolicyChanged { + // Remove all SubConns when switching balancing policy or switching + // fallback mode. + // + // For fallback mode switching with pickfirst, we want to recreate the + // SubConn because the creds could be different. + for a, sc := range lb.subConns { + if oldUsePickFirst { + // If old SubConn were created for pickfirst, bypass cache and + // remove directly. + lb.cc.cc.RemoveSubConn(sc) + } else { + lb.cc.RemoveSubConn(sc) + } + delete(lb.subConns, a) + } + } + + if lb.usePickFirst { + var ( + scKey resolver.Address + sc balancer.SubConn + ) + for scKey, sc = range lb.subConns { + break + } + if sc != nil { + if len(backendAddrs) == 0 { + lb.cc.cc.RemoveSubConn(sc) + delete(lb.subConns, scKey) + return + } + lb.cc.cc.UpdateAddresses(sc, backendAddrs) + sc.Connect() + return + } + // This bypasses the cc wrapper with SubConn cache. + sc, err := lb.cc.cc.NewSubConn(backendAddrs, opts) + if err != nil { + logger.Warningf("grpclb: failed to create new SubConn: %v", err) + return + } + sc.Connect() + lb.subConns[backendAddrs[0]] = sc + lb.scStates[sc] = connectivity.Idle + return + } + + // addrsSet is the set converted from backendAddrsWithoutMetadata, it's used to quick + // lookup for an address. + addrsSet := make(map[resolver.Address]struct{}) + // Create new SubConns. + for _, addr := range backendAddrs { + addrWithoutAttrs := addr + addrWithoutAttrs.Attributes = nil + addrsSet[addrWithoutAttrs] = struct{}{} + lb.backendAddrsWithoutMetadata = append(lb.backendAddrsWithoutMetadata, addrWithoutAttrs) + + if _, ok := lb.subConns[addrWithoutAttrs]; !ok { + // Use addrWithMD to create the SubConn. + sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts) + if err != nil { + logger.Warningf("grpclb: failed to create new SubConn: %v", err) + continue + } + lb.subConns[addrWithoutAttrs] = sc // Use the addr without MD as key for the map. + if _, ok := lb.scStates[sc]; !ok { + // Only set state of new sc to IDLE. The state could already be + // READY for cached SubConns. + lb.scStates[sc] = connectivity.Idle + } + sc.Connect() + } + } + + for a, sc := range lb.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + lb.cc.RemoveSubConn(sc) + delete(lb.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in UpdateSubConnState. + } + } + + // Regenerate and update picker after refreshing subconns because with + // cache, even if SubConn was newed/removed, there might be no state + // changes (the subconn will be kept in cache, not actually + // newed/removed). + lb.updateStateAndPicker(true, true) +} + +type remoteBalancerCCWrapper struct { + cc *grpc.ClientConn + lb *lbBalancer + backoff backoff.Strategy + done chan struct{} + + streamMu sync.Mutex + streamCancel func() + + // waitgroup to wait for all goroutines to exit. + wg sync.WaitGroup +} + +func (lb *lbBalancer) newRemoteBalancerCCWrapper() { + var dopts []grpc.DialOption + if creds := lb.opt.DialCreds; creds != nil { + dopts = append(dopts, grpc.WithTransportCredentials(creds)) + } else if bundle := lb.grpclbClientConnCreds; bundle != nil { + dopts = append(dopts, grpc.WithCredentialsBundle(bundle)) + } else { + dopts = append(dopts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + if lb.opt.Dialer != nil { + dopts = append(dopts, grpc.WithContextDialer(lb.opt.Dialer)) + } + if lb.opt.CustomUserAgent != "" { + dopts = append(dopts, grpc.WithUserAgent(lb.opt.CustomUserAgent)) + } + // Explicitly set pickfirst as the balancer. + dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`)) + dopts = append(dopts, grpc.WithResolvers(lb.manualResolver)) + dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) + + // Enable Keepalive for grpclb client. + dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 20 * time.Second, + Timeout: 10 * time.Second, + PermitWithoutStream: true, + })) + + // The dial target is not important. + // + // The grpclb server addresses will set field ServerName, and creds will + // receive ServerName as authority. + cc, err := grpc.DialContext(context.Background(), lb.manualResolver.Scheme()+":///grpclb.subClientConn", dopts...) + if err != nil { + logger.Fatalf("failed to dial: %v", err) + } + ccw := &remoteBalancerCCWrapper{ + cc: cc, + lb: lb, + backoff: lb.backoff, + done: make(chan struct{}), + } + lb.ccRemoteLB = ccw + ccw.wg.Add(1) + go ccw.watchRemoteBalancer() +} + +// close closed the ClientConn to remote balancer, and waits until all +// goroutines to finish. +func (ccw *remoteBalancerCCWrapper) close() { + close(ccw.done) + ccw.cc.Close() + ccw.wg.Wait() +} + +func (ccw *remoteBalancerCCWrapper) readServerList(s *balanceLoadClientStream) error { + for { + reply, err := s.Recv() + if err != nil { + if err == io.EOF { + return errServerTerminatedConnection + } + return fmt.Errorf("grpclb: failed to recv server list: %v", err) + } + if serverList := reply.GetServerList(); serverList != nil { + ccw.lb.processServerList(serverList) + } + if reply.GetFallbackResponse() != nil { + // Eagerly enter fallback + ccw.lb.mu.Lock() + ccw.lb.refreshSubConns(ccw.lb.resolvedBackendAddrs, true, ccw.lb.usePickFirst) + ccw.lb.mu.Unlock() + } + } +} + +func (ccw *remoteBalancerCCWrapper) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + lastZero := false + for { + select { + case <-ticker.C: + case <-s.Context().Done(): + return + } + stats := ccw.lb.clientStats.toClientStats() + zero := isZeroStats(stats) + if zero && lastZero { + // Quash redundant empty load reports. + continue + } + lastZero = zero + t := time.Now() + stats.Timestamp = ×tamppb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := s.Send(&lbpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{ + ClientStats: stats, + }, + }); err != nil { + return + } + } +} + +func (ccw *remoteBalancerCCWrapper) callRemoteBalancer(ctx context.Context) (backoff bool, _ error) { + lbClient := &loadBalancerClient{cc: ccw.cc} + stream, err := lbClient.BalanceLoad(ctx, grpc.WaitForReady(true)) + if err != nil { + return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) + } + ccw.lb.mu.Lock() + ccw.lb.remoteBalancerConnected = true + ccw.lb.mu.Unlock() + + // grpclb handshake on the stream. + initReq := &lbpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ + InitialRequest: &lbpb.InitialLoadBalanceRequest{ + Name: ccw.lb.target, + }, + }, + } + if err := stream.Send(initReq); err != nil { + return true, fmt.Errorf("grpclb: failed to send init request: %v", err) + } + reply, err := stream.Recv() + if err != nil { + return true, fmt.Errorf("grpclb: failed to recv init response: %v", err) + } + initResp := reply.GetInitialResponse() + if initResp == nil { + return true, fmt.Errorf("grpclb: reply from remote balancer did not include initial response") + } + + ccw.wg.Add(1) + go func() { + defer ccw.wg.Done() + if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { + ccw.sendLoadReport(stream, d) + } + }() + // No backoff if init req/resp handshake was successful. + return false, ccw.readServerList(stream) +} + +// cancelRemoteBalancerCall cancels the context used by the stream to the remote +// balancer. watchRemoteBalancer() takes care of restarting this call after the +// stream fails. +func (ccw *remoteBalancerCCWrapper) cancelRemoteBalancerCall() { + ccw.streamMu.Lock() + if ccw.streamCancel != nil { + ccw.streamCancel() + ccw.streamCancel = nil + } + ccw.streamMu.Unlock() +} + +func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() { + defer func() { + ccw.wg.Done() + ccw.streamMu.Lock() + if ccw.streamCancel != nil { + // This is to make sure that we don't leak the context when we are + // directly returning from inside of the below `for` loop. + ccw.streamCancel() + ccw.streamCancel = nil + } + ccw.streamMu.Unlock() + }() + + var retryCount int + var ctx context.Context + for { + ccw.streamMu.Lock() + if ccw.streamCancel != nil { + ccw.streamCancel() + ccw.streamCancel = nil + } + ctx, ccw.streamCancel = context.WithCancel(context.Background()) + ccw.streamMu.Unlock() + + doBackoff, err := ccw.callRemoteBalancer(ctx) + select { + case <-ccw.done: + return + default: + if err != nil { + if err == errServerTerminatedConnection { + logger.Info(err) + } else { + logger.Warning(err) + } + } + } + // Trigger a re-resolve when the stream errors. + ccw.lb.cc.cc.ResolveNow(resolver.ResolveNowOptions{}) + + ccw.lb.mu.Lock() + ccw.lb.remoteBalancerConnected = false + ccw.lb.fullServerList = nil + // Enter fallback when connection to remote balancer is lost, and the + // aggregated state is not Ready. + if !ccw.lb.inFallback && ccw.lb.state != connectivity.Ready { + // Entering fallback. + ccw.lb.refreshSubConns(ccw.lb.resolvedBackendAddrs, true, ccw.lb.usePickFirst) + } + ccw.lb.mu.Unlock() + + if !doBackoff { + retryCount = 0 + continue + } + + timer := time.NewTimer(ccw.backoff.Backoff(retryCount)) // Copy backoff + select { + case <-timer.C: + case <-ccw.done: + timer.Stop() + return + } + retryCount++ + } +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go new file mode 100644 index 00000000000..373f04b98d3 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "fmt" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// The parent ClientConn should re-resolve when grpclb loses connection to the +// remote balancer. When the ClientConn inside grpclb gets a TransientFailure, +// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's +// ResolveNow, and eventually results in re-resolve happening in parent +// ClientConn's resolver (DNS for example). +// +// parent +// ClientConn +// +-----------------------------------------------------------------+ +// | parent +---------------------------------+ | +// | DNS ClientConn | grpclb | | +// | resolver balancerWrapper | | | +// | + + | grpclb grpclb | | +// | | | | ManualResolver ClientConn | | +// | | | | + + | | +// | | | | | | Transient | | +// | | | | | | Failure | | +// | | | | | <--------- | | | +// | | | <--------------- | ResolveNow | | | +// | | <--------- | ResolveNow | | | | | +// | | ResolveNow | | | | | | +// | | | | | | | | +// | + + | + + | | +// | +---------------------------------+ | +// +-----------------------------------------------------------------+ + +// lbManualResolver is used by the ClientConn inside grpclb. It's a manual +// resolver with a special ResolveNow() function. +// +// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn, +// so when grpclb client lose contact with remote balancers, the parent +// ClientConn's resolver will re-resolve. +type lbManualResolver struct { + scheme string + ccr resolver.ClientConn + + ccb balancer.ClientConn +} + +func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { + r.ccr = cc + return r, nil +} + +func (r *lbManualResolver) Scheme() string { + return r.scheme +} + +// ResolveNow calls resolveNow on the parent ClientConn. +func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOptions) { + r.ccb.ResolveNow(o) +} + +// Close is a noop for Resolver. +func (*lbManualResolver) Close() {} + +// UpdateState calls cc.UpdateState. +func (r *lbManualResolver) UpdateState(s resolver.State) { + r.ccr.UpdateState(s) +} + +const subConnCacheTime = time.Second * 10 + +// lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache. +// SubConns will be kept in cache for subConnCacheTime before being removed. +// +// Its new and remove methods are updated to do cache first. +type lbCacheClientConn struct { + cc balancer.ClientConn + timeout time.Duration + + mu sync.Mutex + // subConnCache only keeps subConns that are being deleted. + subConnCache map[resolver.Address]*subConnCacheEntry + subConnToAddr map[balancer.SubConn]resolver.Address +} + +type subConnCacheEntry struct { + sc balancer.SubConn + + cancel func() + abortDeleting bool +} + +func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn { + return &lbCacheClientConn{ + cc: cc, + timeout: subConnCacheTime, + subConnCache: make(map[resolver.Address]*subConnCacheEntry), + subConnToAddr: make(map[balancer.SubConn]resolver.Address), + } +} + +func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) != 1 { + return nil, fmt.Errorf("grpclb calling NewSubConn with addrs of length %v", len(addrs)) + } + addrWithoutAttrs := addrs[0] + addrWithoutAttrs.Attributes = nil + + ccc.mu.Lock() + defer ccc.mu.Unlock() + if entry, ok := ccc.subConnCache[addrWithoutAttrs]; ok { + // If entry is in subConnCache, the SubConn was being deleted. + // cancel function will never be nil. + entry.cancel() + delete(ccc.subConnCache, addrWithoutAttrs) + return entry.sc, nil + } + + scNew, err := ccc.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + + ccc.subConnToAddr[scNew] = addrWithoutAttrs + return scNew, nil +} + +func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { + ccc.mu.Lock() + defer ccc.mu.Unlock() + addr, ok := ccc.subConnToAddr[sc] + if !ok { + return + } + + if entry, ok := ccc.subConnCache[addr]; ok { + if entry.sc != sc { + // This could happen if NewSubConn was called multiple times for the + // same address, and those SubConns are all removed. We remove sc + // immediately here. + delete(ccc.subConnToAddr, sc) + ccc.cc.RemoveSubConn(sc) + } + return + } + + entry := &subConnCacheEntry{ + sc: sc, + } + ccc.subConnCache[addr] = entry + + timer := time.AfterFunc(ccc.timeout, func() { + ccc.mu.Lock() + defer ccc.mu.Unlock() + if entry.abortDeleting { + return + } + ccc.cc.RemoveSubConn(sc) + delete(ccc.subConnToAddr, sc) + delete(ccc.subConnCache, addr) + }) + entry.cancel = func() { + if !timer.Stop() { + // If stop was not successful, the timer has fired (this can only + // happen in a race). But the deleting function is blocked on ccc.mu + // because the mutex was held by the caller of this function. + // + // Set abortDeleting to true to abort the deleting function. When + // the lock is released, the deleting function will acquire the + // lock, check the value of abortDeleting and return. + entry.abortDeleting = true + } + } +} + +func (ccc *lbCacheClientConn) UpdateState(s balancer.State) { + ccc.cc.UpdateState(s) +} + +func (ccc *lbCacheClientConn) close() { + ccc.mu.Lock() + // Only cancel all existing timers. There's no need to remove SubConns. + for _, entry := range ccc.subConnCache { + entry.cancel() + } + ccc.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/alts.go b/vendor/google.golang.org/grpc/credentials/alts/alts.go new file mode 100644 index 00000000000..579adf210c4 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/alts.go @@ -0,0 +1,332 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package alts implements the ALTS credential support by gRPC library, which +// encapsulates all the state needed by a client to authenticate with a server +// using ALTS and make various assertions, e.g., about the client's identity, +// role, or whether it is authorized to make a particular call. +// This package is experimental. +package alts + +import ( + "context" + "errors" + "fmt" + "net" + "sync" + "time" + + "google.golang.org/grpc/credentials" + core "google.golang.org/grpc/credentials/alts/internal" + "google.golang.org/grpc/credentials/alts/internal/handshaker" + "google.golang.org/grpc/credentials/alts/internal/handshaker/service" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/googlecloud" +) + +const ( + // hypervisorHandshakerServiceAddress represents the default ALTS gRPC + // handshaker service address in the hypervisor. + hypervisorHandshakerServiceAddress = "metadata.google.internal.:8080" + // defaultTimeout specifies the server handshake timeout. + defaultTimeout = 30.0 * time.Second + // The following constants specify the minimum and maximum acceptable + // protocol versions. + protocolVersionMaxMajor = 2 + protocolVersionMaxMinor = 1 + protocolVersionMinMajor = 2 + protocolVersionMinMinor = 1 +) + +var ( + vmOnGCP bool + once sync.Once + maxRPCVersion = &altspb.RpcProtocolVersions_Version{ + Major: protocolVersionMaxMajor, + Minor: protocolVersionMaxMinor, + } + minRPCVersion = &altspb.RpcProtocolVersions_Version{ + Major: protocolVersionMinMajor, + Minor: protocolVersionMinMinor, + } + // ErrUntrustedPlatform is returned from ClientHandshake and + // ServerHandshake is running on a platform where the trustworthiness of + // the handshaker service is not guaranteed. + ErrUntrustedPlatform = errors.New("ALTS: untrusted platform. ALTS is only supported on GCP") + logger = grpclog.Component("alts") +) + +// AuthInfo exposes security information from the ALTS handshake to the +// application. This interface is to be implemented by ALTS. Users should not +// need a brand new implementation of this interface. For situations like +// testing, any new implementation should embed this interface. This allows +// ALTS to add new methods to this interface. +type AuthInfo interface { + // ApplicationProtocol returns application protocol negotiated for the + // ALTS connection. + ApplicationProtocol() string + // RecordProtocol returns the record protocol negotiated for the ALTS + // connection. + RecordProtocol() string + // SecurityLevel returns the security level of the created ALTS secure + // channel. + SecurityLevel() altspb.SecurityLevel + // PeerServiceAccount returns the peer service account. + PeerServiceAccount() string + // LocalServiceAccount returns the local service account. + LocalServiceAccount() string + // PeerRPCVersions returns the RPC version supported by the peer. + PeerRPCVersions() *altspb.RpcProtocolVersions +} + +// ClientOptions contains the client-side options of an ALTS channel. These +// options will be passed to the underlying ALTS handshaker. +type ClientOptions struct { + // TargetServiceAccounts contains a list of expected target service + // accounts. + TargetServiceAccounts []string + // HandshakerServiceAddress represents the ALTS handshaker gRPC service + // address to connect to. + HandshakerServiceAddress string +} + +// DefaultClientOptions creates a new ClientOptions object with the default +// values. +func DefaultClientOptions() *ClientOptions { + return &ClientOptions{ + HandshakerServiceAddress: hypervisorHandshakerServiceAddress, + } +} + +// ServerOptions contains the server-side options of an ALTS channel. These +// options will be passed to the underlying ALTS handshaker. +type ServerOptions struct { + // HandshakerServiceAddress represents the ALTS handshaker gRPC service + // address to connect to. + HandshakerServiceAddress string +} + +// DefaultServerOptions creates a new ServerOptions object with the default +// values. +func DefaultServerOptions() *ServerOptions { + return &ServerOptions{ + HandshakerServiceAddress: hypervisorHandshakerServiceAddress, + } +} + +// altsTC is the credentials required for authenticating a connection using ALTS. +// It implements credentials.TransportCredentials interface. +type altsTC struct { + info *credentials.ProtocolInfo + side core.Side + accounts []string + hsAddress string +} + +// NewClientCreds constructs a client-side ALTS TransportCredentials object. +func NewClientCreds(opts *ClientOptions) credentials.TransportCredentials { + return newALTS(core.ClientSide, opts.TargetServiceAccounts, opts.HandshakerServiceAddress) +} + +// NewServerCreds constructs a server-side ALTS TransportCredentials object. +func NewServerCreds(opts *ServerOptions) credentials.TransportCredentials { + return newALTS(core.ServerSide, nil, opts.HandshakerServiceAddress) +} + +func newALTS(side core.Side, accounts []string, hsAddress string) credentials.TransportCredentials { + once.Do(func() { + vmOnGCP = googlecloud.OnGCE() + }) + if hsAddress == "" { + hsAddress = hypervisorHandshakerServiceAddress + } + return &altsTC{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: "alts", + SecurityVersion: "1.0", + }, + side: side, + accounts: accounts, + hsAddress: hsAddress, + } +} + +// ClientHandshake implements the client side handshake protocol. +func (g *altsTC) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { + if !vmOnGCP { + return nil, nil, ErrUntrustedPlatform + } + + // Connecting to ALTS handshaker service. + hsConn, err := service.Dial(g.hsAddress) + if err != nil { + return nil, nil, err + } + // Do not close hsConn since it is shared with other handshakes. + + // Possible context leak: + // The cancel function for the child context we create will only be + // called a non-nil error is returned. + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + opts := handshaker.DefaultClientHandshakerOptions() + opts.TargetName = addr + opts.TargetServiceAccounts = g.accounts + opts.RPCVersions = &altspb.RpcProtocolVersions{ + MaxRpcVersion: maxRPCVersion, + MinRpcVersion: minRPCVersion, + } + chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, opts) + if err != nil { + return nil, nil, err + } + defer func() { + if err != nil { + chs.Close() + } + }() + secConn, authInfo, err := chs.ClientHandshake(ctx) + if err != nil { + return nil, nil, err + } + altsAuthInfo, ok := authInfo.(AuthInfo) + if !ok { + return nil, nil, errors.New("client-side auth info is not of type alts.AuthInfo") + } + match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + if !match { + return nil, nil, fmt.Errorf("server-side RPC versions are not compatible with this client, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + } + return secConn, authInfo, nil +} + +// ServerHandshake implements the server side ALTS handshaker. +func (g *altsTC) ServerHandshake(rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { + if !vmOnGCP { + return nil, nil, ErrUntrustedPlatform + } + // Connecting to ALTS handshaker service. + hsConn, err := service.Dial(g.hsAddress) + if err != nil { + return nil, nil, err + } + // Do not close hsConn since it's shared with other handshakes. + + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + opts := handshaker.DefaultServerHandshakerOptions() + opts.RPCVersions = &altspb.RpcProtocolVersions{ + MaxRpcVersion: maxRPCVersion, + MinRpcVersion: minRPCVersion, + } + shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, opts) + if err != nil { + return nil, nil, err + } + defer func() { + if err != nil { + shs.Close() + } + }() + secConn, authInfo, err := shs.ServerHandshake(ctx) + if err != nil { + return nil, nil, err + } + altsAuthInfo, ok := authInfo.(AuthInfo) + if !ok { + return nil, nil, errors.New("server-side auth info is not of type alts.AuthInfo") + } + match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + if !match { + return nil, nil, fmt.Errorf("client-side RPC versions is not compatible with this server, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + } + return secConn, authInfo, nil +} + +func (g *altsTC) Info() credentials.ProtocolInfo { + return *g.info +} + +func (g *altsTC) Clone() credentials.TransportCredentials { + info := *g.info + var accounts []string + if g.accounts != nil { + accounts = make([]string, len(g.accounts)) + copy(accounts, g.accounts) + } + return &altsTC{ + info: &info, + side: g.side, + hsAddress: g.hsAddress, + accounts: accounts, + } +} + +func (g *altsTC) OverrideServerName(serverNameOverride string) error { + g.info.ServerName = serverNameOverride + return nil +} + +// compareRPCVersion returns 0 if v1 == v2, 1 if v1 > v2 and -1 if v1 < v2. +func compareRPCVersions(v1, v2 *altspb.RpcProtocolVersions_Version) int { + switch { + case v1.GetMajor() > v2.GetMajor(), + v1.GetMajor() == v2.GetMajor() && v1.GetMinor() > v2.GetMinor(): + return 1 + case v1.GetMajor() < v2.GetMajor(), + v1.GetMajor() == v2.GetMajor() && v1.GetMinor() < v2.GetMinor(): + return -1 + } + return 0 +} + +// checkRPCVersions performs a version check between local and peer rpc protocol +// versions. This function returns true if the check passes which means both +// parties agreed on a common rpc protocol to use, and false otherwise. The +// function also returns the highest common RPC protocol version both parties +// agreed on. +func checkRPCVersions(local, peer *altspb.RpcProtocolVersions) (bool, *altspb.RpcProtocolVersions_Version) { + if local == nil || peer == nil { + logger.Error("invalid checkRPCVersions argument, either local or peer is nil.") + return false, nil + } + + // maxCommonVersion is MIN(local.max, peer.max). + maxCommonVersion := local.GetMaxRpcVersion() + if compareRPCVersions(local.GetMaxRpcVersion(), peer.GetMaxRpcVersion()) > 0 { + maxCommonVersion = peer.GetMaxRpcVersion() + } + + // minCommonVersion is MAX(local.min, peer.min). + minCommonVersion := peer.GetMinRpcVersion() + if compareRPCVersions(local.GetMinRpcVersion(), peer.GetMinRpcVersion()) > 0 { + minCommonVersion = local.GetMinRpcVersion() + } + + if compareRPCVersions(maxCommonVersion, minCommonVersion) < 0 { + return false, nil + } + return true, maxCommonVersion +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go b/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go new file mode 100644 index 00000000000..ebea57da1de --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package authinfo provide authentication information returned by handshakers. +package authinfo + +import ( + "google.golang.org/grpc/credentials" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" +) + +var _ credentials.AuthInfo = (*altsAuthInfo)(nil) + +// altsAuthInfo exposes security information from the ALTS handshake to the +// application. altsAuthInfo is immutable and implements credentials.AuthInfo. +type altsAuthInfo struct { + p *altspb.AltsContext + credentials.CommonAuthInfo +} + +// New returns a new altsAuthInfo object given handshaker results. +func New(result *altspb.HandshakerResult) credentials.AuthInfo { + return newAuthInfo(result) +} + +func newAuthInfo(result *altspb.HandshakerResult) *altsAuthInfo { + return &altsAuthInfo{ + p: &altspb.AltsContext{ + ApplicationProtocol: result.GetApplicationProtocol(), + RecordProtocol: result.GetRecordProtocol(), + // TODO: assign security level from result. + SecurityLevel: altspb.SecurityLevel_INTEGRITY_AND_PRIVACY, + PeerServiceAccount: result.GetPeerIdentity().GetServiceAccount(), + LocalServiceAccount: result.GetLocalIdentity().GetServiceAccount(), + PeerRpcVersions: result.GetPeerRpcVersions(), + PeerAttributes: result.GetPeerIdentity().GetAttributes(), + }, + CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, + } +} + +// AuthType identifies the context as providing ALTS authentication information. +func (s *altsAuthInfo) AuthType() string { + return "alts" +} + +// ApplicationProtocol returns the context's application protocol. +func (s *altsAuthInfo) ApplicationProtocol() string { + return s.p.GetApplicationProtocol() +} + +// RecordProtocol returns the context's record protocol. +func (s *altsAuthInfo) RecordProtocol() string { + return s.p.GetRecordProtocol() +} + +// SecurityLevel returns the context's security level. +func (s *altsAuthInfo) SecurityLevel() altspb.SecurityLevel { + return s.p.GetSecurityLevel() +} + +// PeerServiceAccount returns the context's peer service account. +func (s *altsAuthInfo) PeerServiceAccount() string { + return s.p.GetPeerServiceAccount() +} + +// LocalServiceAccount returns the context's local service account. +func (s *altsAuthInfo) LocalServiceAccount() string { + return s.p.GetLocalServiceAccount() +} + +// PeerRPCVersions returns the context's peer RPC versions. +func (s *altsAuthInfo) PeerRPCVersions() *altspb.RpcProtocolVersions { + return s.p.GetPeerRpcVersions() +} + +// PeerAttributes returns the context's peer attributes. +func (s *altsAuthInfo) PeerAttributes() map[string]string { + return s.p.GetPeerAttributes() +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/common.go b/vendor/google.golang.org/grpc/credentials/alts/internal/common.go new file mode 100644 index 00000000000..3896e8cf2b5 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/common.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains common core functionality for ALTS. +package internal + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +const ( + // ClientSide identifies the client in this communication. + ClientSide Side = iota + // ServerSide identifies the server in this communication. + ServerSide +) + +// PeerNotRespondingError is returned when a peer server is not responding +// after a channel has been established. It is treated as a temporary connection +// error and re-connection to the server should be attempted. +var PeerNotRespondingError = &peerNotRespondingError{} + +// Side identifies the party's role: client or server. +type Side int + +type peerNotRespondingError struct{} + +// Return an error message for the purpose of logging. +func (e *peerNotRespondingError) Error() string { + return "peer server is not responding and re-connection should be attempted." +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e *peerNotRespondingError) Temporary() bool { + return true +} + +// Handshaker defines a ALTS handshaker interface. +type Handshaker interface { + // ClientHandshake starts and completes a client-side handshaking and + // returns a secure connection and corresponding auth information. + ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // ServerHandshake starts and completes a server-side handshaking and + // returns a secure connection and corresponding auth information. + ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // Close terminates the Handshaker. It should be called when the caller + // obtains the secure connection. + Close() +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go new file mode 100644 index 00000000000..43726e877b8 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go @@ -0,0 +1,131 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "encoding/binary" + "fmt" + "strconv" +) + +// rekeyAEAD holds the necessary information for an AEAD based on +// AES-GCM that performs nonce-based key derivation and XORs the +// nonce with a random mask. +type rekeyAEAD struct { + kdfKey []byte + kdfCounter []byte + nonceMask []byte + nonceBuf []byte + gcmAEAD cipher.AEAD +} + +// KeySizeError signals that the given key does not have the correct size. +type KeySizeError int + +func (k KeySizeError) Error() string { + return "alts/conn: invalid key size " + strconv.Itoa(int(k)) +} + +// newRekeyAEAD creates a new instance of aes128gcm with rekeying. +// The key argument should be 44 bytes, the first 32 bytes are used as a key +// for HKDF-expand and the remainining 12 bytes are used as a random mask for +// the counter. +func newRekeyAEAD(key []byte) (*rekeyAEAD, error) { + k := len(key) + if k != kdfKeyLen+nonceLen { + return nil, KeySizeError(k) + } + return &rekeyAEAD{ + kdfKey: key[:kdfKeyLen], + kdfCounter: make([]byte, kdfCounterLen), + nonceMask: key[kdfKeyLen:], + nonceBuf: make([]byte, nonceLen), + gcmAEAD: nil, + }, nil +} + +// Seal rekeys if nonce[2:8] is different than in the last call, masks the nonce, +// and calls Seal for aes128gcm. +func (s *rekeyAEAD) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if err := s.rekeyIfRequired(nonce); err != nil { + panic(fmt.Sprintf("Rekeying failed with: %s", err.Error())) + } + maskNonce(s.nonceBuf, nonce, s.nonceMask) + return s.gcmAEAD.Seal(dst, s.nonceBuf, plaintext, additionalData) +} + +// Open rekeys if nonce[2:8] is different than in the last call, masks the nonce, +// and calls Open for aes128gcm. +func (s *rekeyAEAD) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if err := s.rekeyIfRequired(nonce); err != nil { + return nil, err + } + maskNonce(s.nonceBuf, nonce, s.nonceMask) + return s.gcmAEAD.Open(dst, s.nonceBuf, ciphertext, additionalData) +} + +// rekeyIfRequired creates a new aes128gcm AEAD if the existing AEAD is nil +// or cannot be used with given nonce. +func (s *rekeyAEAD) rekeyIfRequired(nonce []byte) error { + newKdfCounter := nonce[kdfCounterOffset : kdfCounterOffset+kdfCounterLen] + if s.gcmAEAD != nil && bytes.Equal(newKdfCounter, s.kdfCounter) { + return nil + } + copy(s.kdfCounter, newKdfCounter) + a, err := aes.NewCipher(hkdfExpand(s.kdfKey, s.kdfCounter)) + if err != nil { + return err + } + s.gcmAEAD, err = cipher.NewGCM(a) + return err +} + +// maskNonce XORs the given nonce with the mask and stores the result in dst. +func maskNonce(dst, nonce, mask []byte) { + nonce1 := binary.LittleEndian.Uint64(nonce[:sizeUint64]) + nonce2 := binary.LittleEndian.Uint32(nonce[sizeUint64:]) + mask1 := binary.LittleEndian.Uint64(mask[:sizeUint64]) + mask2 := binary.LittleEndian.Uint32(mask[sizeUint64:]) + binary.LittleEndian.PutUint64(dst[:sizeUint64], nonce1^mask1) + binary.LittleEndian.PutUint32(dst[sizeUint64:], nonce2^mask2) +} + +// NonceSize returns the required nonce size. +func (s *rekeyAEAD) NonceSize() int { + return s.gcmAEAD.NonceSize() +} + +// Overhead returns the ciphertext overhead. +func (s *rekeyAEAD) Overhead() int { + return s.gcmAEAD.Overhead() +} + +// hkdfExpand computes the first 16 bytes of the HKDF-expand function +// defined in RFC5869. +func hkdfExpand(key, info []byte) []byte { + mac := hmac.New(sha256.New, key) + mac.Write(info) + mac.Write([]byte{0x01}[:]) + return mac.Sum(nil)[:aeadKeyLen] +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go new file mode 100644 index 00000000000..04e0adb6c90 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go @@ -0,0 +1,105 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "crypto/aes" + "crypto/cipher" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +const ( + // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in + // each direction). + overflowLenAES128GCM = 5 +) + +// aes128gcm is the struct that holds necessary information for ALTS record. +// The counter value is NOT included in the payload during the encryption and +// decryption operations. +type aes128gcm struct { + // inCounter is used in ALTS record to check that incoming counters are + // as expected, since ALTS record guarantees that messages are unwrapped + // in the same order that the peer wrapped them. + inCounter Counter + outCounter Counter + aead cipher.AEAD +} + +// NewAES128GCM creates an instance that uses aes128gcm for ALTS record. +func NewAES128GCM(side core.Side, key []byte) (ALTSRecordCrypto, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + a, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + return &aes128gcm{ + inCounter: NewInCounter(side, overflowLenAES128GCM), + outCounter: NewOutCounter(side, overflowLenAES128GCM), + aead: a, + }, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext do not +// overlap. +func (s *aes128gcm) Encrypt(dst, plaintext []byte) ([]byte, error) { + // If we need to allocate an output buffer, we want to include space for + // GCM tag to avoid forcing ALTS record to reallocate as well. + dlen := len(dst) + dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) + seq, err := s.outCounter.Value() + if err != nil { + return nil, err + } + data := out[:len(plaintext)] + copy(data, plaintext) // data may alias plaintext + + // Seal appends the ciphertext and the tag to its first argument and + // returns the updated slice. However, SliceForAppend above ensures that + // dst has enough capacity to avoid a reallocation and copy due to the + // append. + dst = s.aead.Seal(dst[:dlen], seq, data, nil) + s.outCounter.Inc() + return dst, nil +} + +func (s *aes128gcm) EncryptionOverhead() int { + return GcmTagSize +} + +func (s *aes128gcm) Decrypt(dst, ciphertext []byte) ([]byte, error) { + seq, err := s.inCounter.Value() + if err != nil { + return nil, err + } + // If dst is equal to ciphertext[:0], ciphertext storage is reused. + plaintext, err := s.aead.Open(dst, seq, ciphertext, nil) + if err != nil { + return nil, ErrAuth + } + s.inCounter.Inc() + return plaintext, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go new file mode 100644 index 00000000000..6a9035ea254 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go @@ -0,0 +1,116 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "crypto/cipher" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +const ( + // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in + // each direction). + overflowLenAES128GCMRekey = 8 + nonceLen = 12 + aeadKeyLen = 16 + kdfKeyLen = 32 + kdfCounterOffset = 2 + kdfCounterLen = 6 + sizeUint64 = 8 +) + +// aes128gcmRekey is the struct that holds necessary information for ALTS record. +// The counter value is NOT included in the payload during the encryption and +// decryption operations. +type aes128gcmRekey struct { + // inCounter is used in ALTS record to check that incoming counters are + // as expected, since ALTS record guarantees that messages are unwrapped + // in the same order that the peer wrapped them. + inCounter Counter + outCounter Counter + inAEAD cipher.AEAD + outAEAD cipher.AEAD +} + +// NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying +// for ALTS record. The key argument should be 44 bytes, the first 32 bytes +// are used as a key for HKDF-expand and the remainining 12 bytes are used +// as a random mask for the counter. +func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) { + inCounter := NewInCounter(side, overflowLenAES128GCMRekey) + outCounter := NewOutCounter(side, overflowLenAES128GCMRekey) + inAEAD, err := newRekeyAEAD(key) + if err != nil { + return nil, err + } + outAEAD, err := newRekeyAEAD(key) + if err != nil { + return nil, err + } + return &aes128gcmRekey{ + inCounter, + outCounter, + inAEAD, + outAEAD, + }, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext do not +// overlap. +func (s *aes128gcmRekey) Encrypt(dst, plaintext []byte) ([]byte, error) { + // If we need to allocate an output buffer, we want to include space for + // GCM tag to avoid forcing ALTS record to reallocate as well. + dlen := len(dst) + dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) + seq, err := s.outCounter.Value() + if err != nil { + return nil, err + } + data := out[:len(plaintext)] + copy(data, plaintext) // data may alias plaintext + + // Seal appends the ciphertext and the tag to its first argument and + // returns the updated slice. However, SliceForAppend above ensures that + // dst has enough capacity to avoid a reallocation and copy due to the + // append. + dst = s.outAEAD.Seal(dst[:dlen], seq, data, nil) + s.outCounter.Inc() + return dst, nil +} + +func (s *aes128gcmRekey) EncryptionOverhead() int { + return GcmTagSize +} + +func (s *aes128gcmRekey) Decrypt(dst, ciphertext []byte) ([]byte, error) { + seq, err := s.inCounter.Value() + if err != nil { + return nil, err + } + plaintext, err := s.inAEAD.Open(dst, seq, ciphertext, nil) + if err != nil { + return nil, ErrAuth + } + s.inCounter.Inc() + return plaintext, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go new file mode 100644 index 00000000000..1795d0c9e37 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "encoding/binary" + "errors" + "fmt" +) + +const ( + // GcmTagSize is the GCM tag size is the difference in length between + // plaintext and ciphertext. From crypto/cipher/gcm.go in Go crypto + // library. + GcmTagSize = 16 +) + +// ErrAuth occurs on authentication failure. +var ErrAuth = errors.New("message authentication failed") + +// SliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func SliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return head, tail +} + +// ParseFramedMsg parse the provided buffer and returns a frame of the format +// msgLength+msg and any remaining bytes in that buffer. +func ParseFramedMsg(b []byte, maxLen uint32) ([]byte, []byte, error) { + // If the size field is not complete, return the provided buffer as + // remaining buffer. + if len(b) < MsgLenFieldSize { + return nil, b, nil + } + msgLenField := b[:MsgLenFieldSize] + length := binary.LittleEndian.Uint32(msgLenField) + if length > maxLen { + return nil, nil, fmt.Errorf("received the frame length %d larger than the limit %d", length, maxLen) + } + if len(b) < int(length)+4 { // account for the first 4 msg length bytes. + // Frame is not complete yet. + return nil, b, nil + } + return b[:MsgLenFieldSize+length], b[MsgLenFieldSize+length:], nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go new file mode 100644 index 00000000000..9f00aca0b61 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "errors" +) + +const counterLen = 12 + +var ( + errInvalidCounter = errors.New("invalid counter") +) + +// Counter is a 96-bit, little-endian counter. +type Counter struct { + value [counterLen]byte + invalid bool + overflowLen int +} + +// Value returns the current value of the counter as a byte slice. +func (c *Counter) Value() ([]byte, error) { + if c.invalid { + return nil, errInvalidCounter + } + return c.value[:], nil +} + +// Inc increments the counter and checks for overflow. +func (c *Counter) Inc() { + // If the counter is already invalid, there is no need to increase it. + if c.invalid { + return + } + i := 0 + for ; i < c.overflowLen; i++ { + c.value[i]++ + if c.value[i] != 0 { + break + } + } + if i == c.overflowLen { + c.invalid = true + } +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go new file mode 100644 index 00000000000..0d64fb37a12 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go @@ -0,0 +1,275 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package conn contains an implementation of a secure channel created by gRPC +// handshakers. +package conn + +import ( + "encoding/binary" + "fmt" + "math" + "net" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +// ALTSRecordCrypto is the interface for gRPC ALTS record protocol. +type ALTSRecordCrypto interface { + // Encrypt encrypts the plaintext and computes the tag (if any) of dst + // and plaintext. dst and plaintext may fully overlap or not at all. + Encrypt(dst, plaintext []byte) ([]byte, error) + // EncryptionOverhead returns the tag size (if any) in bytes. + EncryptionOverhead() int + // Decrypt decrypts ciphertext and verify the tag (if any). dst and + // ciphertext may alias exactly or not at all. To reuse ciphertext's + // storage for the decrypted output, use ciphertext[:0] as dst. + Decrypt(dst, ciphertext []byte) ([]byte, error) +} + +// ALTSRecordFunc is a function type for factory functions that create +// ALTSRecordCrypto instances. +type ALTSRecordFunc func(s core.Side, keyData []byte) (ALTSRecordCrypto, error) + +const ( + // MsgLenFieldSize is the byte size of the frame length field of a + // framed message. + MsgLenFieldSize = 4 + // The byte size of the message type field of a framed message. + msgTypeFieldSize = 4 + // The bytes size limit for a ALTS record message. + altsRecordLengthLimit = 1024 * 1024 // 1 MiB + // The default bytes size of a ALTS record message. + altsRecordDefaultLength = 4 * 1024 // 4KiB + // Message type value included in ALTS record framing. + altsRecordMsgType = uint32(0x06) + // The initial write buffer size. + altsWriteBufferInitialSize = 32 * 1024 // 32KiB + // The maximum write buffer size. This *must* be multiple of + // altsRecordDefaultLength. + altsWriteBufferMaxSize = 512 * 1024 // 512KiB +) + +var ( + protocols = make(map[string]ALTSRecordFunc) +) + +// RegisterProtocol register a ALTS record encryption protocol. +func RegisterProtocol(protocol string, f ALTSRecordFunc) error { + if _, ok := protocols[protocol]; ok { + return fmt.Errorf("protocol %v is already registered", protocol) + } + protocols[protocol] = f + return nil +} + +// conn represents a secured connection. It implements the net.Conn interface. +type conn struct { + net.Conn + crypto ALTSRecordCrypto + // buf holds data that has been read from the connection and decrypted, + // but has not yet been returned by Read. + buf []byte + payloadLengthLimit int + // protected holds data read from the network but have not yet been + // decrypted. This data might not compose a complete frame. + protected []byte + // writeBuf is a buffer used to contain encrypted frames before being + // written to the network. + writeBuf []byte + // nextFrame stores the next frame (in protected buffer) info. + nextFrame []byte + // overhead is the calculated overhead of each frame. + overhead int +} + +// NewConn creates a new secure channel instance given the other party role and +// handshaking result. +func NewConn(c net.Conn, side core.Side, recordProtocol string, key []byte, protected []byte) (net.Conn, error) { + newCrypto := protocols[recordProtocol] + if newCrypto == nil { + return nil, fmt.Errorf("negotiated unknown next_protocol %q", recordProtocol) + } + crypto, err := newCrypto(side, key) + if err != nil { + return nil, fmt.Errorf("protocol %q: %v", recordProtocol, err) + } + overhead := MsgLenFieldSize + msgTypeFieldSize + crypto.EncryptionOverhead() + payloadLengthLimit := altsRecordDefaultLength - overhead + var protectedBuf []byte + if protected == nil { + // We pre-allocate protected to be of size + // 2*altsRecordDefaultLength-1 during initialization. We only + // read from the network into protected when protected does not + // contain a complete frame, which is at most + // altsRecordDefaultLength-1 (bytes). And we read at most + // altsRecordDefaultLength (bytes) data into protected at one + // time. Therefore, 2*altsRecordDefaultLength-1 is large enough + // to buffer data read from the network. + protectedBuf = make([]byte, 0, 2*altsRecordDefaultLength-1) + } else { + protectedBuf = make([]byte, len(protected)) + copy(protectedBuf, protected) + } + + altsConn := &conn{ + Conn: c, + crypto: crypto, + payloadLengthLimit: payloadLengthLimit, + protected: protectedBuf, + writeBuf: make([]byte, altsWriteBufferInitialSize), + nextFrame: protectedBuf, + overhead: overhead, + } + return altsConn, nil +} + +// Read reads and decrypts a frame from the underlying connection, and copies the +// decrypted payload into b. If the size of the payload is greater than len(b), +// Read retains the remaining bytes in an internal buffer, and subsequent calls +// to Read will read from this buffer until it is exhausted. +func (p *conn) Read(b []byte) (n int, err error) { + if len(p.buf) == 0 { + var framedMsg []byte + framedMsg, p.nextFrame, err = ParseFramedMsg(p.nextFrame, altsRecordLengthLimit) + if err != nil { + return n, err + } + // Check whether the next frame to be decrypted has been + // completely received yet. + if len(framedMsg) == 0 { + copy(p.protected, p.nextFrame) + p.protected = p.protected[:len(p.nextFrame)] + // Always copy next incomplete frame to the beginning of + // the protected buffer and reset nextFrame to it. + p.nextFrame = p.protected + } + // Check whether a complete frame has been received yet. + for len(framedMsg) == 0 { + if len(p.protected) == cap(p.protected) { + tmp := make([]byte, len(p.protected), cap(p.protected)+altsRecordDefaultLength) + copy(tmp, p.protected) + p.protected = tmp + } + n, err = p.Conn.Read(p.protected[len(p.protected):min(cap(p.protected), len(p.protected)+altsRecordDefaultLength)]) + if err != nil { + return 0, err + } + p.protected = p.protected[:len(p.protected)+n] + framedMsg, p.nextFrame, err = ParseFramedMsg(p.protected, altsRecordLengthLimit) + if err != nil { + return 0, err + } + } + // Now we have a complete frame, decrypted it. + msg := framedMsg[MsgLenFieldSize:] + msgType := binary.LittleEndian.Uint32(msg[:msgTypeFieldSize]) + if msgType&0xff != altsRecordMsgType { + return 0, fmt.Errorf("received frame with incorrect message type %v, expected lower byte %v", + msgType, altsRecordMsgType) + } + ciphertext := msg[msgTypeFieldSize:] + + // Decrypt requires that if the dst and ciphertext alias, they + // must alias exactly. Code here used to use msg[:0], but msg + // starts MsgLenFieldSize+msgTypeFieldSize bytes earlier than + // ciphertext, so they alias inexactly. Using ciphertext[:0] + // arranges the appropriate aliasing without needing to copy + // ciphertext or use a separate destination buffer. For more info + // check: https://golang.org/pkg/crypto/cipher/#AEAD. + p.buf, err = p.crypto.Decrypt(ciphertext[:0], ciphertext) + if err != nil { + return 0, err + } + } + + n = copy(b, p.buf) + p.buf = p.buf[n:] + return n, nil +} + +// Write encrypts, frames, and writes bytes from b to the underlying connection. +func (p *conn) Write(b []byte) (n int, err error) { + n = len(b) + // Calculate the output buffer size with framing and encryption overhead. + numOfFrames := int(math.Ceil(float64(len(b)) / float64(p.payloadLengthLimit))) + size := len(b) + numOfFrames*p.overhead + // If writeBuf is too small, increase its size up to the maximum size. + partialBSize := len(b) + if size > altsWriteBufferMaxSize { + size = altsWriteBufferMaxSize + const numOfFramesInMaxWriteBuf = altsWriteBufferMaxSize / altsRecordDefaultLength + partialBSize = numOfFramesInMaxWriteBuf * p.payloadLengthLimit + } + if len(p.writeBuf) < size { + p.writeBuf = make([]byte, size) + } + + for partialBStart := 0; partialBStart < len(b); partialBStart += partialBSize { + partialBEnd := partialBStart + partialBSize + if partialBEnd > len(b) { + partialBEnd = len(b) + } + partialB := b[partialBStart:partialBEnd] + writeBufIndex := 0 + for len(partialB) > 0 { + payloadLen := len(partialB) + if payloadLen > p.payloadLengthLimit { + payloadLen = p.payloadLengthLimit + } + buf := partialB[:payloadLen] + partialB = partialB[payloadLen:] + + // Write buffer contains: length, type, payload, and tag + // if any. + + // 1. Fill in type field. + msg := p.writeBuf[writeBufIndex+MsgLenFieldSize:] + binary.LittleEndian.PutUint32(msg, altsRecordMsgType) + + // 2. Encrypt the payload and create a tag if any. + msg, err = p.crypto.Encrypt(msg[:msgTypeFieldSize], buf) + if err != nil { + return n, err + } + + // 3. Fill in the size field. + binary.LittleEndian.PutUint32(p.writeBuf[writeBufIndex:], uint32(len(msg))) + + // 4. Increase writeBufIndex. + writeBufIndex += len(buf) + p.overhead + } + nn, err := p.Conn.Write(p.writeBuf[:writeBufIndex]) + if err != nil { + // We need to calculate the actual data size that was + // written. This means we need to remove header, + // encryption overheads, and any partially-written + // frame data. + numOfWrittenFrames := int(math.Floor(float64(nn) / float64(altsRecordDefaultLength))) + return partialBStart + numOfWrittenFrames*p.payloadLengthLimit, err + } + } + return n, nil +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go new file mode 100644 index 00000000000..84821fa2543 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import core "google.golang.org/grpc/credentials/alts/internal" + +// NewOutCounter returns an outgoing counter initialized to the starting sequence +// number for the client/server side of a connection. +func NewOutCounter(s core.Side, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + if s == core.ServerSide { + // Server counters in ALTS record have the little-endian high bit + // set. + c.value[counterLen-1] = 0x80 + } + return +} + +// NewInCounter returns an incoming counter initialized to the starting sequence +// number for the client/server side of a connection. This is used in ALTS record +// to check that incoming counters are as expected, since ALTS record guarantees +// that messages are unwrapped in the same order that the peer wrapped them. +func NewInCounter(s core.Side, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + if s == core.ClientSide { + // Server counters in ALTS record have the little-endian high bit + // set. + c.value[counterLen-1] = 0x80 + } + return +} + +// CounterFromValue creates a new counter given an initial value. +func CounterFromValue(value []byte, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + copy(c.value[:], value) + return +} + +// CounterSide returns the connection side (client/server) a sequence counter is +// associated with. +func CounterSide(c []byte) core.Side { + if c[counterLen-1]&0x80 == 0x80 { + return core.ServerSide + } + return core.ClientSide +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go new file mode 100644 index 00000000000..7b953a520e5 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -0,0 +1,375 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package handshaker provides ALTS handshaking functionality for GCP. +package handshaker + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "sync" + + grpc "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + core "google.golang.org/grpc/credentials/alts/internal" + "google.golang.org/grpc/credentials/alts/internal/authinfo" + "google.golang.org/grpc/credentials/alts/internal/conn" + altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" +) + +const ( + // The maximum byte size of receive frames. + frameLimit = 64 * 1024 // 64 KB + rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY" + // maxPendingHandshakes represents the maximum number of concurrent + // handshakes. + maxPendingHandshakes = 100 +) + +var ( + hsProtocol = altspb.HandshakeProtocol_ALTS + appProtocols = []string{"grpc"} + recordProtocols = []string{rekeyRecordProtocolName} + keyLength = map[string]int{ + rekeyRecordProtocolName: 44, + } + altsRecordFuncs = map[string]conn.ALTSRecordFunc{ + // ALTS handshaker protocols. + rekeyRecordProtocolName: func(s core.Side, keyData []byte) (conn.ALTSRecordCrypto, error) { + return conn.NewAES128GCMRekey(s, keyData) + }, + } + // control number of concurrent created (but not closed) handshakers. + mu sync.Mutex + concurrentHandshakes = int64(0) + // errDropped occurs when maxPendingHandshakes is reached. + errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached") + // errOutOfBound occurs when the handshake service returns a consumed + // bytes value larger than the buffer that was passed to it originally. + errOutOfBound = errors.New("handshaker service consumed bytes value is out-of-bound") +) + +func init() { + for protocol, f := range altsRecordFuncs { + if err := conn.RegisterProtocol(protocol, f); err != nil { + panic(err) + } + } +} + +func acquire() bool { + mu.Lock() + // If we need n to be configurable, we can pass it as an argument. + n := int64(1) + success := maxPendingHandshakes-concurrentHandshakes >= n + if success { + concurrentHandshakes += n + } + mu.Unlock() + return success +} + +func release() { + mu.Lock() + // If we need n to be configurable, we can pass it as an argument. + n := int64(1) + concurrentHandshakes -= n + if concurrentHandshakes < 0 { + mu.Unlock() + panic("bad release") + } + mu.Unlock() +} + +// ClientHandshakerOptions contains the client handshaker options that can +// provided by the caller. +type ClientHandshakerOptions struct { + // ClientIdentity is the handshaker client local identity. + ClientIdentity *altspb.Identity + // TargetName is the server service account name for secure name + // checking. + TargetName string + // TargetServiceAccounts contains a list of expected target service + // accounts. One of these accounts should match one of the accounts in + // the handshaker results. Otherwise, the handshake fails. + TargetServiceAccounts []string + // RPCVersions specifies the gRPC versions accepted by the client. + RPCVersions *altspb.RpcProtocolVersions +} + +// ServerHandshakerOptions contains the server handshaker options that can +// provided by the caller. +type ServerHandshakerOptions struct { + // RPCVersions specifies the gRPC versions accepted by the server. + RPCVersions *altspb.RpcProtocolVersions +} + +// DefaultClientHandshakerOptions returns the default client handshaker options. +func DefaultClientHandshakerOptions() *ClientHandshakerOptions { + return &ClientHandshakerOptions{} +} + +// DefaultServerHandshakerOptions returns the default client handshaker options. +func DefaultServerHandshakerOptions() *ServerHandshakerOptions { + return &ServerHandshakerOptions{} +} + +// TODO: add support for future local and remote endpoint in both client options +// and server options (server options struct does not exist now. When +// caller can provide endpoints, it should be created. + +// altsHandshaker is used to complete a ALTS handshaking between client and +// server. This handshaker talks to the ALTS handshaker service in the metadata +// server. +type altsHandshaker struct { + // RPC stream used to access the ALTS Handshaker service. + stream altsgrpc.HandshakerService_DoHandshakeClient + // the connection to the peer. + conn net.Conn + // client handshake options. + clientOpts *ClientHandshakerOptions + // server handshake options. + serverOpts *ServerHandshakerOptions + // defines the side doing the handshake, client or server. + side core.Side +} + +// NewClientHandshaker creates a ALTS handshaker for GCP which contains an RPC +// stub created using the passed conn and used to talk to the ALTS Handshaker +// service in the metadata server. +func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) + if err != nil { + return nil, err + } + return &altsHandshaker{ + stream: stream, + conn: c, + clientOpts: opts, + side: core.ClientSide, + }, nil +} + +// NewServerHandshaker creates a ALTS handshaker for GCP which contains an RPC +// stub created using the passed conn and used to talk to the ALTS Handshaker +// service in the metadata server. +func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) + if err != nil { + return nil, err + } + return &altsHandshaker{ + stream: stream, + conn: c, + serverOpts: opts, + side: core.ServerSide, + }, nil +} + +// ClientHandshake starts and completes a client ALTS handshaking for GCP. Once +// done, ClientHandshake returns a secure connection. +func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { + if !acquire() { + return nil, nil, errDropped + } + defer release() + + if h.side != core.ClientSide { + return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") + } + + // Create target identities from service account list. + targetIdentities := make([]*altspb.Identity, 0, len(h.clientOpts.TargetServiceAccounts)) + for _, account := range h.clientOpts.TargetServiceAccounts { + targetIdentities = append(targetIdentities, &altspb.Identity{ + IdentityOneof: &altspb.Identity_ServiceAccount{ + ServiceAccount: account, + }, + }) + } + req := &altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_ClientStart{ + ClientStart: &altspb.StartClientHandshakeReq{ + HandshakeSecurityProtocol: hsProtocol, + ApplicationProtocols: appProtocols, + RecordProtocols: recordProtocols, + TargetIdentities: targetIdentities, + LocalIdentity: h.clientOpts.ClientIdentity, + TargetName: h.clientOpts.TargetName, + RpcVersions: h.clientOpts.RPCVersions, + }, + }, + } + + conn, result, err := h.doHandshake(req) + if err != nil { + return nil, nil, err + } + authInfo := authinfo.New(result) + return conn, authInfo, nil +} + +// ServerHandshake starts and completes a server ALTS handshaking for GCP. Once +// done, ServerHandshake returns a secure connection. +func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { + if !acquire() { + return nil, nil, errDropped + } + defer release() + + if h.side != core.ServerSide { + return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") + } + + p := make([]byte, frameLimit) + n, err := h.conn.Read(p) + if err != nil { + return nil, nil, err + } + + // Prepare server parameters. + // TODO: currently only ALTS parameters are provided. Might need to use + // more options in the future. + params := make(map[int32]*altspb.ServerHandshakeParameters) + params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{ + RecordProtocols: recordProtocols, + } + req := &altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_ServerStart{ + ServerStart: &altspb.StartServerHandshakeReq{ + ApplicationProtocols: appProtocols, + HandshakeParameters: params, + InBytes: p[:n], + RpcVersions: h.serverOpts.RPCVersions, + }, + }, + } + + conn, result, err := h.doHandshake(req) + if err != nil { + return nil, nil, err + } + authInfo := authinfo.New(result) + return conn, authInfo, nil +} + +func (h *altsHandshaker) doHandshake(req *altspb.HandshakerReq) (net.Conn, *altspb.HandshakerResult, error) { + resp, err := h.accessHandshakerService(req) + if err != nil { + return nil, nil, err + } + // Check of the returned status is an error. + if resp.GetStatus() != nil { + if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want { + return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details) + } + } + + var extra []byte + if req.GetServerStart() != nil { + if resp.GetBytesConsumed() > uint32(len(req.GetServerStart().GetInBytes())) { + return nil, nil, errOutOfBound + } + extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():] + } + result, extra, err := h.processUntilDone(resp, extra) + if err != nil { + return nil, nil, err + } + // The handshaker returns a 128 bytes key. It should be truncated based + // on the returned record protocol. + keyLen, ok := keyLength[result.RecordProtocol] + if !ok { + return nil, nil, fmt.Errorf("unknown resulted record protocol %v", result.RecordProtocol) + } + sc, err := conn.NewConn(h.conn, h.side, result.GetRecordProtocol(), result.KeyData[:keyLen], extra) + if err != nil { + return nil, nil, err + } + return sc, result, nil +} + +func (h *altsHandshaker) accessHandshakerService(req *altspb.HandshakerReq) (*altspb.HandshakerResp, error) { + if err := h.stream.Send(req); err != nil { + return nil, err + } + resp, err := h.stream.Recv() + if err != nil { + return nil, err + } + return resp, nil +} + +// processUntilDone processes the handshake until the handshaker service returns +// the results. Handshaker service takes care of frame parsing, so we read +// whatever received from the network and send it to the handshaker service. +func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []byte) (*altspb.HandshakerResult, []byte, error) { + for { + if len(resp.OutFrames) > 0 { + if _, err := h.conn.Write(resp.OutFrames); err != nil { + return nil, nil, err + } + } + if resp.Result != nil { + return resp.Result, extra, nil + } + buf := make([]byte, frameLimit) + n, err := h.conn.Read(buf) + if err != nil && err != io.EOF { + return nil, nil, err + } + // If there is nothing to send to the handshaker service, and + // nothing is received from the peer, then we are stuck. + // This covers the case when the peer is not responding. Note + // that handshaker service connection issues are caught in + // accessHandshakerService before we even get here. + if len(resp.OutFrames) == 0 && n == 0 { + return nil, nil, core.PeerNotRespondingError + } + // Append extra bytes from the previous interaction with the + // handshaker service with the current buffer read from conn. + p := append(extra, buf[:n]...) + // From here on, p and extra point to the same slice. + resp, err = h.accessHandshakerService(&altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_Next{ + Next: &altspb.NextHandshakeMessageReq{ + InBytes: p, + }, + }, + }) + if err != nil { + return nil, nil, err + } + // Set extra based on handshaker service response. + if resp.GetBytesConsumed() > uint32(len(p)) { + return nil, nil, errOutOfBound + } + extra = p[resp.GetBytesConsumed():] + } +} + +// Close terminates the Handshaker. It should be called when the caller obtains +// the secure connection. +func (h *altsHandshaker) Close() { + h.stream.CloseSend() +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go new file mode 100644 index 00000000000..2de2c4affda --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -0,0 +1,60 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package service manages connections between the VM application and the ALTS +// handshaker service. +package service + +import ( + "sync" + + grpc "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +var ( + // mu guards hsConnMap and hsDialer. + mu sync.Mutex + // hsConn represents a mapping from a hypervisor handshaker service address + // to a corresponding connection to a hypervisor handshaker service + // instance. + hsConnMap = make(map[string]*grpc.ClientConn) + // hsDialer will be reassigned in tests. + hsDialer = grpc.Dial +) + +// Dial dials the handshake service in the hypervisor. If a connection has +// already been established, this function returns it. Otherwise, a new +// connection is created. +func Dial(hsAddress string) (*grpc.ClientConn, error) { + mu.Lock() + defer mu.Unlock() + + hsConn, ok := hsConnMap[hsAddress] + if !ok { + // Create a new connection to the handshaker service. Note that + // this connection stays open until the application is closed. + var err error + hsConn, err = hsDialer(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, err + } + hsConnMap[hsAddress] = hsConn + } + return hsConn, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go new file mode 100644 index 00000000000..703b48da753 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -0,0 +1,264 @@ +// Copyright 2018 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/altscontext.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: grpc/gcp/altscontext.proto + +package grpc_gcp + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type AltsContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocol negotiated for this connection. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The record protocol negotiated for this connection. + RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` + // The security level of the created secure channel. + SecurityLevel SecurityLevel `protobuf:"varint,3,opt,name=security_level,json=securityLevel,proto3,enum=grpc.gcp.SecurityLevel" json:"security_level,omitempty"` + // The peer service account. + PeerServiceAccount string `protobuf:"bytes,4,opt,name=peer_service_account,json=peerServiceAccount,proto3" json:"peer_service_account,omitempty"` + // The local service account. + LocalServiceAccount string `protobuf:"bytes,5,opt,name=local_service_account,json=localServiceAccount,proto3" json:"local_service_account,omitempty"` + // The RPC protocol versions supported by the peer. + PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` + // Additional attributes of the peer. + PeerAttributes map[string]string `protobuf:"bytes,7,rep,name=peer_attributes,json=peerAttributes,proto3" json:"peer_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *AltsContext) Reset() { + *x = AltsContext{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AltsContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AltsContext) ProtoMessage() {} + +func (x *AltsContext) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AltsContext.ProtoReflect.Descriptor instead. +func (*AltsContext) Descriptor() ([]byte, []int) { + return file_grpc_gcp_altscontext_proto_rawDescGZIP(), []int{0} +} + +func (x *AltsContext) GetApplicationProtocol() string { + if x != nil { + return x.ApplicationProtocol + } + return "" +} + +func (x *AltsContext) GetRecordProtocol() string { + if x != nil { + return x.RecordProtocol + } + return "" +} + +func (x *AltsContext) GetSecurityLevel() SecurityLevel { + if x != nil { + return x.SecurityLevel + } + return SecurityLevel_SECURITY_NONE +} + +func (x *AltsContext) GetPeerServiceAccount() string { + if x != nil { + return x.PeerServiceAccount + } + return "" +} + +func (x *AltsContext) GetLocalServiceAccount() string { + if x != nil { + return x.LocalServiceAccount + } + return "" +} + +func (x *AltsContext) GetPeerRpcVersions() *RpcProtocolVersions { + if x != nil { + return x.PeerRpcVersions + } + return nil +} + +func (x *AltsContext) GetPeerAttributes() map[string]string { + if x != nil { + return x.PeerAttributes + } + return nil +} + +var File_grpc_gcp_altscontext_proto protoreflect.FileDescriptor + +var file_grpc_gcp_altscontext_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x1a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, + 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xf1, 0x03, 0x0a, 0x0b, 0x41, 0x6c, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, + 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0e, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0d, 0x73, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x30, 0x0a, 0x14, + 0x70, 0x65, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x70, 0x65, 0x65, 0x72, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x32, + 0x0a, 0x15, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, + 0x65, 0x72, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x52, 0x0a, + 0x0f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, + 0x70, 0x2e, 0x41, 0x6c, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x50, 0x65, + 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x1a, 0x41, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x42, 0x6c, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x10, 0x41, + 0x6c, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, + 0x63, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_gcp_altscontext_proto_rawDescOnce sync.Once + file_grpc_gcp_altscontext_proto_rawDescData = file_grpc_gcp_altscontext_proto_rawDesc +) + +func file_grpc_gcp_altscontext_proto_rawDescGZIP() []byte { + file_grpc_gcp_altscontext_proto_rawDescOnce.Do(func() { + file_grpc_gcp_altscontext_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_gcp_altscontext_proto_rawDescData) + }) + return file_grpc_gcp_altscontext_proto_rawDescData +} + +var file_grpc_gcp_altscontext_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_grpc_gcp_altscontext_proto_goTypes = []interface{}{ + (*AltsContext)(nil), // 0: grpc.gcp.AltsContext + nil, // 1: grpc.gcp.AltsContext.PeerAttributesEntry + (SecurityLevel)(0), // 2: grpc.gcp.SecurityLevel + (*RpcProtocolVersions)(nil), // 3: grpc.gcp.RpcProtocolVersions +} +var file_grpc_gcp_altscontext_proto_depIdxs = []int32{ + 2, // 0: grpc.gcp.AltsContext.security_level:type_name -> grpc.gcp.SecurityLevel + 3, // 1: grpc.gcp.AltsContext.peer_rpc_versions:type_name -> grpc.gcp.RpcProtocolVersions + 1, // 2: grpc.gcp.AltsContext.peer_attributes:type_name -> grpc.gcp.AltsContext.PeerAttributesEntry + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_grpc_gcp_altscontext_proto_init() } +func file_grpc_gcp_altscontext_proto_init() { + if File_grpc_gcp_altscontext_proto != nil { + return + } + file_grpc_gcp_transport_security_common_proto_init() + if !protoimpl.UnsafeEnabled { + file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AltsContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_gcp_altscontext_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_gcp_altscontext_proto_goTypes, + DependencyIndexes: file_grpc_gcp_altscontext_proto_depIdxs, + MessageInfos: file_grpc_gcp_altscontext_proto_msgTypes, + }.Build() + File_grpc_gcp_altscontext_proto = out.File + file_grpc_gcp_altscontext_proto_rawDesc = nil + file_grpc_gcp_altscontext_proto_goTypes = nil + file_grpc_gcp_altscontext_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go new file mode 100644 index 00000000000..383c5fb97a7 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -0,0 +1,1428 @@ +// Copyright 2018 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/handshaker.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: grpc/gcp/handshaker.proto + +package grpc_gcp + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type HandshakeProtocol int32 + +const ( + // Default value. + HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED HandshakeProtocol = 0 + // TLS handshake protocol. + HandshakeProtocol_TLS HandshakeProtocol = 1 + // Application Layer Transport Security handshake protocol. + HandshakeProtocol_ALTS HandshakeProtocol = 2 +) + +// Enum value maps for HandshakeProtocol. +var ( + HandshakeProtocol_name = map[int32]string{ + 0: "HANDSHAKE_PROTOCOL_UNSPECIFIED", + 1: "TLS", + 2: "ALTS", + } + HandshakeProtocol_value = map[string]int32{ + "HANDSHAKE_PROTOCOL_UNSPECIFIED": 0, + "TLS": 1, + "ALTS": 2, + } +) + +func (x HandshakeProtocol) Enum() *HandshakeProtocol { + p := new(HandshakeProtocol) + *p = x + return p +} + +func (x HandshakeProtocol) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HandshakeProtocol) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_gcp_handshaker_proto_enumTypes[0].Descriptor() +} + +func (HandshakeProtocol) Type() protoreflect.EnumType { + return &file_grpc_gcp_handshaker_proto_enumTypes[0] +} + +func (x HandshakeProtocol) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HandshakeProtocol.Descriptor instead. +func (HandshakeProtocol) EnumDescriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{0} +} + +type NetworkProtocol int32 + +const ( + NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED NetworkProtocol = 0 + NetworkProtocol_TCP NetworkProtocol = 1 + NetworkProtocol_UDP NetworkProtocol = 2 +) + +// Enum value maps for NetworkProtocol. +var ( + NetworkProtocol_name = map[int32]string{ + 0: "NETWORK_PROTOCOL_UNSPECIFIED", + 1: "TCP", + 2: "UDP", + } + NetworkProtocol_value = map[string]int32{ + "NETWORK_PROTOCOL_UNSPECIFIED": 0, + "TCP": 1, + "UDP": 2, + } +) + +func (x NetworkProtocol) Enum() *NetworkProtocol { + p := new(NetworkProtocol) + *p = x + return p +} + +func (x NetworkProtocol) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (NetworkProtocol) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_gcp_handshaker_proto_enumTypes[1].Descriptor() +} + +func (NetworkProtocol) Type() protoreflect.EnumType { + return &file_grpc_gcp_handshaker_proto_enumTypes[1] +} + +func (x NetworkProtocol) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use NetworkProtocol.Descriptor instead. +func (NetworkProtocol) EnumDescriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{1} +} + +type Endpoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // IP address. It should contain an IPv4 or IPv6 string literal, e.g. + // "192.168.0.1" or "2001:db8::1". + IpAddress string `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + // Port number. + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + // Network protocol (e.g., TCP, UDP) associated with this endpoint. + Protocol NetworkProtocol `protobuf:"varint,3,opt,name=protocol,proto3,enum=grpc.gcp.NetworkProtocol" json:"protocol,omitempty"` +} + +func (x *Endpoint) Reset() { + *x = Endpoint{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Endpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Endpoint) ProtoMessage() {} + +func (x *Endpoint) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Endpoint.ProtoReflect.Descriptor instead. +func (*Endpoint) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{0} +} + +func (x *Endpoint) GetIpAddress() string { + if x != nil { + return x.IpAddress + } + return "" +} + +func (x *Endpoint) GetPort() int32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *Endpoint) GetProtocol() NetworkProtocol { + if x != nil { + return x.Protocol + } + return NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED +} + +type Identity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to IdentityOneof: + // + // *Identity_ServiceAccount + // *Identity_Hostname + IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` + // Additional attributes of the identity. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Identity) Reset() { + *x = Identity{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Identity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identity) ProtoMessage() {} + +func (x *Identity) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identity.ProtoReflect.Descriptor instead. +func (*Identity) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{1} +} + +func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { + if m != nil { + return m.IdentityOneof + } + return nil +} + +func (x *Identity) GetServiceAccount() string { + if x, ok := x.GetIdentityOneof().(*Identity_ServiceAccount); ok { + return x.ServiceAccount + } + return "" +} + +func (x *Identity) GetHostname() string { + if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { + return x.Hostname + } + return "" +} + +func (x *Identity) GetAttributes() map[string]string { + if x != nil { + return x.Attributes + } + return nil +} + +type isIdentity_IdentityOneof interface { + isIdentity_IdentityOneof() +} + +type Identity_ServiceAccount struct { + // Service account of a connection endpoint. + ServiceAccount string `protobuf:"bytes,1,opt,name=service_account,json=serviceAccount,proto3,oneof"` +} + +type Identity_Hostname struct { + // Hostname of a connection endpoint. + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` +} + +func (*Identity_ServiceAccount) isIdentity_IdentityOneof() {} + +func (*Identity_Hostname) isIdentity_IdentityOneof() {} + +type StartClientHandshakeReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Handshake security protocol requested by the client. + HandshakeSecurityProtocol HandshakeProtocol `protobuf:"varint,1,opt,name=handshake_security_protocol,json=handshakeSecurityProtocol,proto3,enum=grpc.gcp.HandshakeProtocol" json:"handshake_security_protocol,omitempty"` + // The application protocols supported by the client, e.g., "h2" (for http2), + // "grpc". + ApplicationProtocols []string `protobuf:"bytes,2,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // The record protocols supported by the client, e.g., + // "ALTSRP_GCM_AES128". + RecordProtocols []string `protobuf:"bytes,3,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` + // (Optional) Describes which server identities are acceptable by the client. + // If target identities are provided and none of them matches the peer + // identity of the server, handshake will fail. + TargetIdentities []*Identity `protobuf:"bytes,4,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"` + // (Optional) Application may specify a local identity. Otherwise, the + // handshaker chooses a default local identity. + LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // (Optional) Local endpoint information of the connection to the server, + // such as local IP address, port number, and network protocol. + LocalEndpoint *Endpoint `protobuf:"bytes,6,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` + // (Optional) Endpoint information of the remote server, such as IP address, + // port number, and network protocol. + RemoteEndpoint *Endpoint `protobuf:"bytes,7,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` + // (Optional) If target name is provided, a secure naming check is performed + // to verify that the peer authenticated identity is indeed authorized to run + // the target name. + TargetName string `protobuf:"bytes,8,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` + // (Optional) RPC protocol versions supported by the client. + RpcVersions *RpcProtocolVersions `protobuf:"bytes,9,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` + // (Optional) Maximum frame size supported by the client. + MaxFrameSize uint32 `protobuf:"varint,10,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` +} + +func (x *StartClientHandshakeReq) Reset() { + *x = StartClientHandshakeReq{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartClientHandshakeReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartClientHandshakeReq) ProtoMessage() {} + +func (x *StartClientHandshakeReq) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartClientHandshakeReq.ProtoReflect.Descriptor instead. +func (*StartClientHandshakeReq) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{2} +} + +func (x *StartClientHandshakeReq) GetHandshakeSecurityProtocol() HandshakeProtocol { + if x != nil { + return x.HandshakeSecurityProtocol + } + return HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED +} + +func (x *StartClientHandshakeReq) GetApplicationProtocols() []string { + if x != nil { + return x.ApplicationProtocols + } + return nil +} + +func (x *StartClientHandshakeReq) GetRecordProtocols() []string { + if x != nil { + return x.RecordProtocols + } + return nil +} + +func (x *StartClientHandshakeReq) GetTargetIdentities() []*Identity { + if x != nil { + return x.TargetIdentities + } + return nil +} + +func (x *StartClientHandshakeReq) GetLocalIdentity() *Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *StartClientHandshakeReq) GetLocalEndpoint() *Endpoint { + if x != nil { + return x.LocalEndpoint + } + return nil +} + +func (x *StartClientHandshakeReq) GetRemoteEndpoint() *Endpoint { + if x != nil { + return x.RemoteEndpoint + } + return nil +} + +func (x *StartClientHandshakeReq) GetTargetName() string { + if x != nil { + return x.TargetName + } + return "" +} + +func (x *StartClientHandshakeReq) GetRpcVersions() *RpcProtocolVersions { + if x != nil { + return x.RpcVersions + } + return nil +} + +func (x *StartClientHandshakeReq) GetMaxFrameSize() uint32 { + if x != nil { + return x.MaxFrameSize + } + return 0 +} + +type ServerHandshakeParameters struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The record protocols supported by the server, e.g., + // "ALTSRP_GCM_AES128". + RecordProtocols []string `protobuf:"bytes,1,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` + // (Optional) A list of local identities supported by the server, if + // specified. Otherwise, the handshaker chooses a default local identity. + LocalIdentities []*Identity `protobuf:"bytes,2,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` +} + +func (x *ServerHandshakeParameters) Reset() { + *x = ServerHandshakeParameters{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerHandshakeParameters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerHandshakeParameters) ProtoMessage() {} + +func (x *ServerHandshakeParameters) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerHandshakeParameters.ProtoReflect.Descriptor instead. +func (*ServerHandshakeParameters) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{3} +} + +func (x *ServerHandshakeParameters) GetRecordProtocols() []string { + if x != nil { + return x.RecordProtocols + } + return nil +} + +func (x *ServerHandshakeParameters) GetLocalIdentities() []*Identity { + if x != nil { + return x.LocalIdentities + } + return nil +} + +type StartServerHandshakeReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocols supported by the server, e.g., "h2" (for http2), + // "grpc". + ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // Handshake parameters (record protocols and local identities supported by + // the server) mapped by the handshake protocol. Each handshake security + // protocol (e.g., TLS or ALTS) has its own set of record protocols and local + // identities. Since protobuf does not support enum as key to the map, the key + // to handshake_parameters is the integer value of HandshakeProtocol enum. + HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Bytes in out_frames returned from the peer's HandshakerResp. It is possible + // that the peer's out_frames are split into multiple HandshakReq messages. + InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` + // (Optional) Local endpoint information of the connection to the client, + // such as local IP address, port number, and network protocol. + LocalEndpoint *Endpoint `protobuf:"bytes,4,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` + // (Optional) Endpoint information of the remote client, such as IP address, + // port number, and network protocol. + RemoteEndpoint *Endpoint `protobuf:"bytes,5,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` + // (Optional) RPC protocol versions supported by the server. + RpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` + // (Optional) Maximum frame size supported by the server. + MaxFrameSize uint32 `protobuf:"varint,7,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` +} + +func (x *StartServerHandshakeReq) Reset() { + *x = StartServerHandshakeReq{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartServerHandshakeReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartServerHandshakeReq) ProtoMessage() {} + +func (x *StartServerHandshakeReq) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartServerHandshakeReq.ProtoReflect.Descriptor instead. +func (*StartServerHandshakeReq) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{4} +} + +func (x *StartServerHandshakeReq) GetApplicationProtocols() []string { + if x != nil { + return x.ApplicationProtocols + } + return nil +} + +func (x *StartServerHandshakeReq) GetHandshakeParameters() map[int32]*ServerHandshakeParameters { + if x != nil { + return x.HandshakeParameters + } + return nil +} + +func (x *StartServerHandshakeReq) GetInBytes() []byte { + if x != nil { + return x.InBytes + } + return nil +} + +func (x *StartServerHandshakeReq) GetLocalEndpoint() *Endpoint { + if x != nil { + return x.LocalEndpoint + } + return nil +} + +func (x *StartServerHandshakeReq) GetRemoteEndpoint() *Endpoint { + if x != nil { + return x.RemoteEndpoint + } + return nil +} + +func (x *StartServerHandshakeReq) GetRpcVersions() *RpcProtocolVersions { + if x != nil { + return x.RpcVersions + } + return nil +} + +func (x *StartServerHandshakeReq) GetMaxFrameSize() uint32 { + if x != nil { + return x.MaxFrameSize + } + return 0 +} + +type NextHandshakeMessageReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Bytes in out_frames returned from the peer's HandshakerResp. It is possible + // that the peer's out_frames are split into multiple NextHandshakerMessageReq + // messages. + InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` +} + +func (x *NextHandshakeMessageReq) Reset() { + *x = NextHandshakeMessageReq{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NextHandshakeMessageReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NextHandshakeMessageReq) ProtoMessage() {} + +func (x *NextHandshakeMessageReq) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NextHandshakeMessageReq.ProtoReflect.Descriptor instead. +func (*NextHandshakeMessageReq) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{5} +} + +func (x *NextHandshakeMessageReq) GetInBytes() []byte { + if x != nil { + return x.InBytes + } + return nil +} + +type HandshakerReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ReqOneof: + // + // *HandshakerReq_ClientStart + // *HandshakerReq_ServerStart + // *HandshakerReq_Next + ReqOneof isHandshakerReq_ReqOneof `protobuf_oneof:"req_oneof"` +} + +func (x *HandshakerReq) Reset() { + *x = HandshakerReq{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HandshakerReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HandshakerReq) ProtoMessage() {} + +func (x *HandshakerReq) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HandshakerReq.ProtoReflect.Descriptor instead. +func (*HandshakerReq) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{6} +} + +func (m *HandshakerReq) GetReqOneof() isHandshakerReq_ReqOneof { + if m != nil { + return m.ReqOneof + } + return nil +} + +func (x *HandshakerReq) GetClientStart() *StartClientHandshakeReq { + if x, ok := x.GetReqOneof().(*HandshakerReq_ClientStart); ok { + return x.ClientStart + } + return nil +} + +func (x *HandshakerReq) GetServerStart() *StartServerHandshakeReq { + if x, ok := x.GetReqOneof().(*HandshakerReq_ServerStart); ok { + return x.ServerStart + } + return nil +} + +func (x *HandshakerReq) GetNext() *NextHandshakeMessageReq { + if x, ok := x.GetReqOneof().(*HandshakerReq_Next); ok { + return x.Next + } + return nil +} + +type isHandshakerReq_ReqOneof interface { + isHandshakerReq_ReqOneof() +} + +type HandshakerReq_ClientStart struct { + // The start client handshake request message. + ClientStart *StartClientHandshakeReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"` +} + +type HandshakerReq_ServerStart struct { + // The start server handshake request message. + ServerStart *StartServerHandshakeReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"` +} + +type HandshakerReq_Next struct { + // The next handshake request message. + Next *NextHandshakeMessageReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"` +} + +func (*HandshakerReq_ClientStart) isHandshakerReq_ReqOneof() {} + +func (*HandshakerReq_ServerStart) isHandshakerReq_ReqOneof() {} + +func (*HandshakerReq_Next) isHandshakerReq_ReqOneof() {} + +type HandshakerResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocol negotiated for this connection. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The record protocol negotiated for this connection. + RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` + // Cryptographic key data. The key data may be more than the key length + // required for the record protocol, thus the client of the handshaker + // service needs to truncate the key data into the right key length. + KeyData []byte `protobuf:"bytes,3,opt,name=key_data,json=keyData,proto3" json:"key_data,omitempty"` + // The authenticated identity of the peer. + PeerIdentity *Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` + // The local identity used in the handshake. + LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // Indicate whether the handshaker service client should keep the channel + // between the handshaker service open, e.g., in order to handle + // post-handshake messages in the future. + KeepChannelOpen bool `protobuf:"varint,6,opt,name=keep_channel_open,json=keepChannelOpen,proto3" json:"keep_channel_open,omitempty"` + // The RPC protocol versions supported by the peer. + PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,7,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` + // The maximum frame size of the peer. + MaxFrameSize uint32 `protobuf:"varint,8,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` +} + +func (x *HandshakerResult) Reset() { + *x = HandshakerResult{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HandshakerResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HandshakerResult) ProtoMessage() {} + +func (x *HandshakerResult) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HandshakerResult.ProtoReflect.Descriptor instead. +func (*HandshakerResult) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{7} +} + +func (x *HandshakerResult) GetApplicationProtocol() string { + if x != nil { + return x.ApplicationProtocol + } + return "" +} + +func (x *HandshakerResult) GetRecordProtocol() string { + if x != nil { + return x.RecordProtocol + } + return "" +} + +func (x *HandshakerResult) GetKeyData() []byte { + if x != nil { + return x.KeyData + } + return nil +} + +func (x *HandshakerResult) GetPeerIdentity() *Identity { + if x != nil { + return x.PeerIdentity + } + return nil +} + +func (x *HandshakerResult) GetLocalIdentity() *Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *HandshakerResult) GetKeepChannelOpen() bool { + if x != nil { + return x.KeepChannelOpen + } + return false +} + +func (x *HandshakerResult) GetPeerRpcVersions() *RpcProtocolVersions { + if x != nil { + return x.PeerRpcVersions + } + return nil +} + +func (x *HandshakerResult) GetMaxFrameSize() uint32 { + if x != nil { + return x.MaxFrameSize + } + return 0 +} + +type HandshakerStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code. This could be the gRPC status code. + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // The status details. + Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` +} + +func (x *HandshakerStatus) Reset() { + *x = HandshakerStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HandshakerStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HandshakerStatus) ProtoMessage() {} + +func (x *HandshakerStatus) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HandshakerStatus.ProtoReflect.Descriptor instead. +func (*HandshakerStatus) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{8} +} + +func (x *HandshakerStatus) GetCode() uint32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *HandshakerStatus) GetDetails() string { + if x != nil { + return x.Details + } + return "" +} + +type HandshakerResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Frames to be given to the peer for the NextHandshakeMessageReq. May be + // empty if no out_frames have to be sent to the peer or if in_bytes in the + // HandshakerReq are incomplete. All the non-empty out frames must be sent to + // the peer even if the handshaker status is not OK as these frames may + // contain the alert frames. + OutFrames []byte `protobuf:"bytes,1,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"` + // Number of bytes in the in_bytes consumed by the handshaker. It is possible + // that part of in_bytes in HandshakerReq was unrelated to the handshake + // process. + BytesConsumed uint32 `protobuf:"varint,2,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"` + // This is set iff the handshake was successful. out_frames may still be set + // to frames that needs to be forwarded to the peer. + Result *HandshakerResult `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` + // Status of the handshaker. + Status *HandshakerStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *HandshakerResp) Reset() { + *x = HandshakerResp{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HandshakerResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HandshakerResp) ProtoMessage() {} + +func (x *HandshakerResp) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HandshakerResp.ProtoReflect.Descriptor instead. +func (*HandshakerResp) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{9} +} + +func (x *HandshakerResp) GetOutFrames() []byte { + if x != nil { + return x.OutFrames + } + return nil +} + +func (x *HandshakerResp) GetBytesConsumed() uint32 { + if x != nil { + return x.BytesConsumed + } + return 0 +} + +func (x *HandshakerResp) GetResult() *HandshakerResult { + if x != nil { + return x.Result + } + return nil +} + +func (x *HandshakerResp) GetStatus() *HandshakerStatus { + if x != nil { + return x.Status + } + return nil +} + +var File_grpc_gcp_handshaker_proto protoreflect.FileDescriptor + +var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x68, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x63, 0x70, 0x1a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x74, 0x0a, 0x08, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x69, + 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x35, + 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xe8, 0x01, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x29, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1c, 0x0a, + 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0a, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, + 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, + 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x22, 0xd3, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x5b, 0x0a, 0x1b, + 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, + 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x19, + 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x29, + 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x11, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1f, 0x0a, + 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x40, + 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, + 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x85, 0x01, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, + 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0f, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa5, + 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, + 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, + 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, + 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19, + 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, + 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, 0x61, 0x6e, + 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, + 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x34, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, + 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, + 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe5, 0x01, 0x0a, + 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, + 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, + 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, + 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, + 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, + 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, + 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, + 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, + 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, + 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, + 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, + 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, + 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, + 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, + 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, + 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, + 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_gcp_handshaker_proto_rawDescOnce sync.Once + file_grpc_gcp_handshaker_proto_rawDescData = file_grpc_gcp_handshaker_proto_rawDesc +) + +func file_grpc_gcp_handshaker_proto_rawDescGZIP() []byte { + file_grpc_gcp_handshaker_proto_rawDescOnce.Do(func() { + file_grpc_gcp_handshaker_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_gcp_handshaker_proto_rawDescData) + }) + return file_grpc_gcp_handshaker_proto_rawDescData +} + +var file_grpc_gcp_handshaker_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_grpc_gcp_handshaker_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_grpc_gcp_handshaker_proto_goTypes = []interface{}{ + (HandshakeProtocol)(0), // 0: grpc.gcp.HandshakeProtocol + (NetworkProtocol)(0), // 1: grpc.gcp.NetworkProtocol + (*Endpoint)(nil), // 2: grpc.gcp.Endpoint + (*Identity)(nil), // 3: grpc.gcp.Identity + (*StartClientHandshakeReq)(nil), // 4: grpc.gcp.StartClientHandshakeReq + (*ServerHandshakeParameters)(nil), // 5: grpc.gcp.ServerHandshakeParameters + (*StartServerHandshakeReq)(nil), // 6: grpc.gcp.StartServerHandshakeReq + (*NextHandshakeMessageReq)(nil), // 7: grpc.gcp.NextHandshakeMessageReq + (*HandshakerReq)(nil), // 8: grpc.gcp.HandshakerReq + (*HandshakerResult)(nil), // 9: grpc.gcp.HandshakerResult + (*HandshakerStatus)(nil), // 10: grpc.gcp.HandshakerStatus + (*HandshakerResp)(nil), // 11: grpc.gcp.HandshakerResp + nil, // 12: grpc.gcp.Identity.AttributesEntry + nil, // 13: grpc.gcp.StartServerHandshakeReq.HandshakeParametersEntry + (*RpcProtocolVersions)(nil), // 14: grpc.gcp.RpcProtocolVersions +} +var file_grpc_gcp_handshaker_proto_depIdxs = []int32{ + 1, // 0: grpc.gcp.Endpoint.protocol:type_name -> grpc.gcp.NetworkProtocol + 12, // 1: grpc.gcp.Identity.attributes:type_name -> grpc.gcp.Identity.AttributesEntry + 0, // 2: grpc.gcp.StartClientHandshakeReq.handshake_security_protocol:type_name -> grpc.gcp.HandshakeProtocol + 3, // 3: grpc.gcp.StartClientHandshakeReq.target_identities:type_name -> grpc.gcp.Identity + 3, // 4: grpc.gcp.StartClientHandshakeReq.local_identity:type_name -> grpc.gcp.Identity + 2, // 5: grpc.gcp.StartClientHandshakeReq.local_endpoint:type_name -> grpc.gcp.Endpoint + 2, // 6: grpc.gcp.StartClientHandshakeReq.remote_endpoint:type_name -> grpc.gcp.Endpoint + 14, // 7: grpc.gcp.StartClientHandshakeReq.rpc_versions:type_name -> grpc.gcp.RpcProtocolVersions + 3, // 8: grpc.gcp.ServerHandshakeParameters.local_identities:type_name -> grpc.gcp.Identity + 13, // 9: grpc.gcp.StartServerHandshakeReq.handshake_parameters:type_name -> grpc.gcp.StartServerHandshakeReq.HandshakeParametersEntry + 2, // 10: grpc.gcp.StartServerHandshakeReq.local_endpoint:type_name -> grpc.gcp.Endpoint + 2, // 11: grpc.gcp.StartServerHandshakeReq.remote_endpoint:type_name -> grpc.gcp.Endpoint + 14, // 12: grpc.gcp.StartServerHandshakeReq.rpc_versions:type_name -> grpc.gcp.RpcProtocolVersions + 4, // 13: grpc.gcp.HandshakerReq.client_start:type_name -> grpc.gcp.StartClientHandshakeReq + 6, // 14: grpc.gcp.HandshakerReq.server_start:type_name -> grpc.gcp.StartServerHandshakeReq + 7, // 15: grpc.gcp.HandshakerReq.next:type_name -> grpc.gcp.NextHandshakeMessageReq + 3, // 16: grpc.gcp.HandshakerResult.peer_identity:type_name -> grpc.gcp.Identity + 3, // 17: grpc.gcp.HandshakerResult.local_identity:type_name -> grpc.gcp.Identity + 14, // 18: grpc.gcp.HandshakerResult.peer_rpc_versions:type_name -> grpc.gcp.RpcProtocolVersions + 9, // 19: grpc.gcp.HandshakerResp.result:type_name -> grpc.gcp.HandshakerResult + 10, // 20: grpc.gcp.HandshakerResp.status:type_name -> grpc.gcp.HandshakerStatus + 5, // 21: grpc.gcp.StartServerHandshakeReq.HandshakeParametersEntry.value:type_name -> grpc.gcp.ServerHandshakeParameters + 8, // 22: grpc.gcp.HandshakerService.DoHandshake:input_type -> grpc.gcp.HandshakerReq + 11, // 23: grpc.gcp.HandshakerService.DoHandshake:output_type -> grpc.gcp.HandshakerResp + 23, // [23:24] is the sub-list for method output_type + 22, // [22:23] is the sub-list for method input_type + 22, // [22:22] is the sub-list for extension type_name + 22, // [22:22] is the sub-list for extension extendee + 0, // [0:22] is the sub-list for field type_name +} + +func init() { file_grpc_gcp_handshaker_proto_init() } +func file_grpc_gcp_handshaker_proto_init() { + if File_grpc_gcp_handshaker_proto != nil { + return + } + file_grpc_gcp_transport_security_common_proto_init() + if !protoimpl.UnsafeEnabled { + file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Endpoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Identity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartClientHandshakeReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerHandshakeParameters); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartServerHandshakeReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NextHandshakeMessageReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HandshakerReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HandshakerResult); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HandshakerStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HandshakerResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Identity_ServiceAccount)(nil), + (*Identity_Hostname)(nil), + } + file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []interface{}{ + (*HandshakerReq_ClientStart)(nil), + (*HandshakerReq_ServerStart)(nil), + (*HandshakerReq_Next)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_gcp_handshaker_proto_rawDesc, + NumEnums: 2, + NumMessages: 12, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_gcp_handshaker_proto_goTypes, + DependencyIndexes: file_grpc_gcp_handshaker_proto_depIdxs, + EnumInfos: file_grpc_gcp_handshaker_proto_enumTypes, + MessageInfos: file_grpc_gcp_handshaker_proto_msgTypes, + }.Build() + File_grpc_gcp_handshaker_proto = out.File + file_grpc_gcp_handshaker_proto_rawDesc = nil + file_grpc_gcp_handshaker_proto_goTypes = nil + file_grpc_gcp_handshaker_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go new file mode 100644 index 00000000000..d3562c6d5e6 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -0,0 +1,166 @@ +// Copyright 2018 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/handshaker.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.14.0 +// source: grpc/gcp/handshaker.proto + +package grpc_gcp + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// HandshakerServiceClient is the client API for HandshakerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type HandshakerServiceClient interface { + // Handshaker service accepts a stream of handshaker request, returning a + // stream of handshaker response. Client is expected to send exactly one + // message with either client_start or server_start followed by one or more + // messages with next. Each time client sends a request, the handshaker + // service expects to respond. Client does not have to wait for service's + // response before sending next request. + DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) +} + +type handshakerServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewHandshakerServiceClient(cc grpc.ClientConnInterface) HandshakerServiceClient { + return &handshakerServiceClient{cc} +} + +func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) { + stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], "/grpc.gcp.HandshakerService/DoHandshake", opts...) + if err != nil { + return nil, err + } + x := &handshakerServiceDoHandshakeClient{stream} + return x, nil +} + +type HandshakerService_DoHandshakeClient interface { + Send(*HandshakerReq) error + Recv() (*HandshakerResp, error) + grpc.ClientStream +} + +type handshakerServiceDoHandshakeClient struct { + grpc.ClientStream +} + +func (x *handshakerServiceDoHandshakeClient) Send(m *HandshakerReq) error { + return x.ClientStream.SendMsg(m) +} + +func (x *handshakerServiceDoHandshakeClient) Recv() (*HandshakerResp, error) { + m := new(HandshakerResp) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// HandshakerServiceServer is the server API for HandshakerService service. +// All implementations must embed UnimplementedHandshakerServiceServer +// for forward compatibility +type HandshakerServiceServer interface { + // Handshaker service accepts a stream of handshaker request, returning a + // stream of handshaker response. Client is expected to send exactly one + // message with either client_start or server_start followed by one or more + // messages with next. Each time client sends a request, the handshaker + // service expects to respond. Client does not have to wait for service's + // response before sending next request. + DoHandshake(HandshakerService_DoHandshakeServer) error + mustEmbedUnimplementedHandshakerServiceServer() +} + +// UnimplementedHandshakerServiceServer must be embedded to have forward compatible implementations. +type UnimplementedHandshakerServiceServer struct { +} + +func (UnimplementedHandshakerServiceServer) DoHandshake(HandshakerService_DoHandshakeServer) error { + return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented") +} +func (UnimplementedHandshakerServiceServer) mustEmbedUnimplementedHandshakerServiceServer() {} + +// UnsafeHandshakerServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to HandshakerServiceServer will +// result in compilation errors. +type UnsafeHandshakerServiceServer interface { + mustEmbedUnimplementedHandshakerServiceServer() +} + +func RegisterHandshakerServiceServer(s grpc.ServiceRegistrar, srv HandshakerServiceServer) { + s.RegisterService(&HandshakerService_ServiceDesc, srv) +} + +func _HandshakerService_DoHandshake_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(HandshakerServiceServer).DoHandshake(&handshakerServiceDoHandshakeServer{stream}) +} + +type HandshakerService_DoHandshakeServer interface { + Send(*HandshakerResp) error + Recv() (*HandshakerReq, error) + grpc.ServerStream +} + +type handshakerServiceDoHandshakeServer struct { + grpc.ServerStream +} + +func (x *handshakerServiceDoHandshakeServer) Send(m *HandshakerResp) error { + return x.ServerStream.SendMsg(m) +} + +func (x *handshakerServiceDoHandshakeServer) Recv() (*HandshakerReq, error) { + m := new(HandshakerReq) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// HandshakerService_ServiceDesc is the grpc.ServiceDesc for HandshakerService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var HandshakerService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.gcp.HandshakerService", + HandlerType: (*HandshakerServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "DoHandshake", + Handler: _HandshakerService_DoHandshake_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/gcp/handshaker.proto", +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go new file mode 100644 index 00000000000..4fc3c79d6a3 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -0,0 +1,326 @@ +// Copyright 2018 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/transport_security_common.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: grpc/gcp/transport_security_common.proto + +package grpc_gcp + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// The security level of the created channel. The list is sorted in increasing +// level of security. This order must always be maintained. +type SecurityLevel int32 + +const ( + SecurityLevel_SECURITY_NONE SecurityLevel = 0 + SecurityLevel_INTEGRITY_ONLY SecurityLevel = 1 + SecurityLevel_INTEGRITY_AND_PRIVACY SecurityLevel = 2 +) + +// Enum value maps for SecurityLevel. +var ( + SecurityLevel_name = map[int32]string{ + 0: "SECURITY_NONE", + 1: "INTEGRITY_ONLY", + 2: "INTEGRITY_AND_PRIVACY", + } + SecurityLevel_value = map[string]int32{ + "SECURITY_NONE": 0, + "INTEGRITY_ONLY": 1, + "INTEGRITY_AND_PRIVACY": 2, + } +) + +func (x SecurityLevel) Enum() *SecurityLevel { + p := new(SecurityLevel) + *p = x + return p +} + +func (x SecurityLevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityLevel) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_gcp_transport_security_common_proto_enumTypes[0].Descriptor() +} + +func (SecurityLevel) Type() protoreflect.EnumType { + return &file_grpc_gcp_transport_security_common_proto_enumTypes[0] +} + +func (x SecurityLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SecurityLevel.Descriptor instead. +func (SecurityLevel) EnumDescriptor() ([]byte, []int) { + return file_grpc_gcp_transport_security_common_proto_rawDescGZIP(), []int{0} +} + +// Max and min supported RPC protocol versions. +type RpcProtocolVersions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Maximum supported RPC version. + MaxRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,1,opt,name=max_rpc_version,json=maxRpcVersion,proto3" json:"max_rpc_version,omitempty"` + // Minimum supported RPC version. + MinRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,2,opt,name=min_rpc_version,json=minRpcVersion,proto3" json:"min_rpc_version,omitempty"` +} + +func (x *RpcProtocolVersions) Reset() { + *x = RpcProtocolVersions{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcProtocolVersions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcProtocolVersions) ProtoMessage() {} + +func (x *RpcProtocolVersions) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcProtocolVersions.ProtoReflect.Descriptor instead. +func (*RpcProtocolVersions) Descriptor() ([]byte, []int) { + return file_grpc_gcp_transport_security_common_proto_rawDescGZIP(), []int{0} +} + +func (x *RpcProtocolVersions) GetMaxRpcVersion() *RpcProtocolVersions_Version { + if x != nil { + return x.MaxRpcVersion + } + return nil +} + +func (x *RpcProtocolVersions) GetMinRpcVersion() *RpcProtocolVersions_Version { + if x != nil { + return x.MinRpcVersion + } + return nil +} + +// RPC version contains a major version and a minor version. +type RpcProtocolVersions_Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major uint32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor uint32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` +} + +func (x *RpcProtocolVersions_Version) Reset() { + *x = RpcProtocolVersions_Version{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcProtocolVersions_Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcProtocolVersions_Version) ProtoMessage() {} + +func (x *RpcProtocolVersions_Version) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcProtocolVersions_Version.ProtoReflect.Descriptor instead. +func (*RpcProtocolVersions_Version) Descriptor() ([]byte, []int) { + return file_grpc_gcp_transport_security_common_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *RpcProtocolVersions_Version) GetMajor() uint32 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *RpcProtocolVersions_Version) GetMinor() uint32 { + if x != nil { + return x.Minor + } + return 0 +} + +var File_grpc_gcp_transport_security_common_proto protoreflect.FileDescriptor + +var file_grpc_gcp_transport_security_common_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x63, 0x70, 0x22, 0xea, 0x01, 0x0a, 0x13, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4d, 0x0a, 0x0f, + 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, + 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, + 0x78, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x0f, 0x6d, + 0x69, 0x6e, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, + 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, + 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, + 0x72, 0x2a, 0x51, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x45, 0x43, 0x55, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, + 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x52, 0x49, + 0x54, 0x59, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x49, 0x4e, 0x54, + 0x45, 0x47, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x49, 0x56, 0x41, + 0x43, 0x59, 0x10, 0x02, 0x42, 0x78, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x1c, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, + 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_gcp_transport_security_common_proto_rawDescOnce sync.Once + file_grpc_gcp_transport_security_common_proto_rawDescData = file_grpc_gcp_transport_security_common_proto_rawDesc +) + +func file_grpc_gcp_transport_security_common_proto_rawDescGZIP() []byte { + file_grpc_gcp_transport_security_common_proto_rawDescOnce.Do(func() { + file_grpc_gcp_transport_security_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_gcp_transport_security_common_proto_rawDescData) + }) + return file_grpc_gcp_transport_security_common_proto_rawDescData +} + +var file_grpc_gcp_transport_security_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_grpc_gcp_transport_security_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_grpc_gcp_transport_security_common_proto_goTypes = []interface{}{ + (SecurityLevel)(0), // 0: grpc.gcp.SecurityLevel + (*RpcProtocolVersions)(nil), // 1: grpc.gcp.RpcProtocolVersions + (*RpcProtocolVersions_Version)(nil), // 2: grpc.gcp.RpcProtocolVersions.Version +} +var file_grpc_gcp_transport_security_common_proto_depIdxs = []int32{ + 2, // 0: grpc.gcp.RpcProtocolVersions.max_rpc_version:type_name -> grpc.gcp.RpcProtocolVersions.Version + 2, // 1: grpc.gcp.RpcProtocolVersions.min_rpc_version:type_name -> grpc.gcp.RpcProtocolVersions.Version + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_grpc_gcp_transport_security_common_proto_init() } +func file_grpc_gcp_transport_security_common_proto_init() { + if File_grpc_gcp_transport_security_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RpcProtocolVersions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RpcProtocolVersions_Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_gcp_transport_security_common_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_gcp_transport_security_common_proto_goTypes, + DependencyIndexes: file_grpc_gcp_transport_security_common_proto_depIdxs, + EnumInfos: file_grpc_gcp_transport_security_common_proto_enumTypes, + MessageInfos: file_grpc_gcp_transport_security_common_proto_msgTypes, + }.Build() + File_grpc_gcp_transport_security_common_proto = out.File + file_grpc_gcp_transport_security_common_proto_rawDesc = nil + file_grpc_gcp_transport_security_common_proto_goTypes = nil + file_grpc_gcp_transport_security_common_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/utils.go b/vendor/google.golang.org/grpc/credentials/alts/utils.go new file mode 100644 index 00000000000..cbfd056cfb1 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/utils.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package alts + +import ( + "context" + "errors" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" +) + +// AuthInfoFromContext extracts the alts.AuthInfo object from the given context, +// if it exists. This API should be used by gRPC server RPC handlers to get +// information about the communicating peer. For client-side, use grpc.Peer() +// CallOption. +func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("no Peer found in Context") + } + return AuthInfoFromPeer(p) +} + +// AuthInfoFromPeer extracts the alts.AuthInfo object from the given peer, if it +// exists. This API should be used by gRPC clients after obtaining a peer object +// using the grpc.Peer() CallOption. +func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) { + altsAuthInfo, ok := p.AuthInfo.(AuthInfo) + if !ok { + return nil, errors.New("no alts.AuthInfo found in Peer") + } + return altsAuthInfo, nil +} + +// ClientAuthorizationCheck checks whether the client is authorized to access +// the requested resources based on the given expected client service accounts. +// This API should be used by gRPC server RPC handlers. This API should not be +// used by clients. +func ClientAuthorizationCheck(ctx context.Context, expectedServiceAccounts []string) error { + authInfo, err := AuthInfoFromContext(ctx) + if err != nil { + return status.Errorf(codes.PermissionDenied, "The context is not an ALTS-compatible context: %v", err) + } + peer := authInfo.PeerServiceAccount() + for _, sa := range expectedServiceAccounts { + if strings.EqualFold(peer, sa) { + return nil + } + } + return status.Errorf(codes.PermissionDenied, "Client %v is not authorized", peer) +} diff --git a/vendor/google.golang.org/grpc/credentials/google/google.go b/vendor/google.golang.org/grpc/credentials/google/google.go new file mode 100644 index 00000000000..fbdf7dc2997 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/google/google.go @@ -0,0 +1,145 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package google defines credentials for google cloud services. +package google + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/alts" + "google.golang.org/grpc/credentials/oauth" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +const tokenRequestTimeout = 30 * time.Second + +var logger = grpclog.Component("credentials") + +// DefaultCredentialsOptions constructs options to build DefaultCredentials. +type DefaultCredentialsOptions struct { + // PerRPCCreds is a per RPC credentials that is passed to a bundle. + PerRPCCreds credentials.PerRPCCredentials +} + +// NewDefaultCredentialsWithOptions returns a credentials bundle that is +// configured to work with google services. +// +// This API is experimental. +func NewDefaultCredentialsWithOptions(opts DefaultCredentialsOptions) credentials.Bundle { + if opts.PerRPCCreds == nil { + ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) + defer cancel() + var err error + opts.PerRPCCreds, err = newADC(ctx) + if err != nil { + logger.Warningf("NewDefaultCredentialsWithOptions: failed to create application oauth: %v", err) + } + } + c := &creds{opts: opts} + bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) + if err != nil { + logger.Warningf("NewDefaultCredentialsWithOptions: failed to create new creds: %v", err) + } + return bundle +} + +// NewDefaultCredentials returns a credentials bundle that is configured to work +// with google services. +// +// This API is experimental. +func NewDefaultCredentials() credentials.Bundle { + return NewDefaultCredentialsWithOptions(DefaultCredentialsOptions{}) +} + +// NewComputeEngineCredentials returns a credentials bundle that is configured to work +// with google services. This API must only be used when running on GCE. Authentication configured +// by this API represents the GCE VM's default service account. +// +// This API is experimental. +func NewComputeEngineCredentials() credentials.Bundle { + return NewDefaultCredentialsWithOptions(DefaultCredentialsOptions{ + PerRPCCreds: oauth.NewComputeEngine(), + }) +} + +// creds implements credentials.Bundle. +type creds struct { + opts DefaultCredentialsOptions + + // Supported modes are defined in internal/internal.go. + mode string + // The active transport credentials associated with this bundle. + transportCreds credentials.TransportCredentials + // The active per RPC credentials associated with this bundle. + perRPCCreds credentials.PerRPCCredentials +} + +func (c *creds) TransportCredentials() credentials.TransportCredentials { + return c.transportCreds +} + +func (c *creds) PerRPCCredentials() credentials.PerRPCCredentials { + if c == nil { + return nil + } + return c.perRPCCreds +} + +var ( + newTLS = func() credentials.TransportCredentials { + return credentials.NewTLS(nil) + } + newALTS = func() credentials.TransportCredentials { + return alts.NewClientCreds(alts.DefaultClientOptions()) + } + newADC = func(ctx context.Context) (credentials.PerRPCCredentials, error) { + return oauth.NewApplicationDefault(ctx) + } +) + +// NewWithMode should make a copy of Bundle, and switch mode. Modifying the +// existing Bundle may cause races. +func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { + newCreds := &creds{ + opts: c.opts, + mode: mode, + } + + // Create transport credentials. + switch mode { + case internal.CredsBundleModeFallback: + newCreds.transportCreds = newClusterTransportCreds(newTLS(), newALTS()) + case internal.CredsBundleModeBackendFromBalancer, internal.CredsBundleModeBalancer: + // Only the clients can use google default credentials, so we only need + // to create new ALTS client creds here. + newCreds.transportCreds = newALTS() + default: + return nil, fmt.Errorf("unsupported mode: %v", mode) + } + + if mode == internal.CredsBundleModeFallback || mode == internal.CredsBundleModeBackendFromBalancer { + newCreds.perRPCCreds = newCreds.opts.PerRPCCreds + } + + return newCreds, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/google/xds.go b/vendor/google.golang.org/grpc/credentials/google/xds.go new file mode 100644 index 00000000000..2c5c8b9eee1 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/google/xds.go @@ -0,0 +1,128 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package google + +import ( + "context" + "net" + "net/url" + "strings" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" +) + +const cfeClusterNamePrefix = "google_cfe_" +const cfeClusterResourceNamePrefix = "/envoy.config.cluster.v3.Cluster/google_cfe_" +const cfeClusterAuthorityName = "traffic-director-c2p.xds.googleapis.com" + +// clusterTransportCreds is a combo of TLS + ALTS. +// +// On the client, ClientHandshake picks TLS or ALTS based on address attributes. +// - if attributes has cluster name +// - if cluster name has prefix "google_cfe_", or +// "xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_", +// use TLS +// - otherwise, use ALTS +// +// - else, do TLS +// +// On the server, ServerHandshake always does TLS. +type clusterTransportCreds struct { + tls credentials.TransportCredentials + alts credentials.TransportCredentials +} + +func newClusterTransportCreds(tls, alts credentials.TransportCredentials) *clusterTransportCreds { + return &clusterTransportCreds{ + tls: tls, + alts: alts, + } +} + +// clusterName returns the xDS cluster name stored in the attributes in the +// context. +func clusterName(ctx context.Context) string { + chi := credentials.ClientHandshakeInfoFromContext(ctx) + if chi.Attributes == nil { + return "" + } + cluster, _ := internal.GetXDSHandshakeClusterName(chi.Attributes) + return cluster +} + +// isDirectPathCluster returns true if the cluster in the context is a +// directpath cluster, meaning ALTS should be used. +func isDirectPathCluster(ctx context.Context) bool { + cluster := clusterName(ctx) + if cluster == "" { + // No cluster; not xDS; use TLS. + return false + } + if strings.HasPrefix(cluster, cfeClusterNamePrefix) { + // xDS cluster prefixed by "google_cfe_"; use TLS. + return false + } + if !strings.HasPrefix(cluster, "xdstp:") { + // Other xDS cluster name; use ALTS. + return true + } + u, err := url.Parse(cluster) + if err != nil { + // Shouldn't happen, but assume ALTS. + return true + } + // If authority AND path match our CFE checks, use TLS; otherwise use ALTS. + return u.Host != cfeClusterAuthorityName || !strings.HasPrefix(u.Path, cfeClusterResourceNamePrefix) +} + +func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if isDirectPathCluster(ctx) { + // If attributes have cluster name, and cluster name is not cfe, it's a + // backend address, use ALTS. + return c.alts.ClientHandshake(ctx, authority, rawConn) + } + return c.tls.ClientHandshake(ctx, authority, rawConn) +} + +func (c *clusterTransportCreds) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return c.tls.ServerHandshake(conn) +} + +func (c *clusterTransportCreds) Info() credentials.ProtocolInfo { + // TODO: this always returns tls.Info now, because we don't have a cluster + // name to check when this method is called. This method doesn't affect + // anything important now. We may want to revisit this if it becomes more + // important later. + return c.tls.Info() +} + +func (c *clusterTransportCreds) Clone() credentials.TransportCredentials { + return &clusterTransportCreds{ + tls: c.tls.Clone(), + alts: c.alts.Clone(), + } +} + +func (c *clusterTransportCreds) OverrideServerName(s string) error { + if err := c.tls.OverrideServerName(s); err != nil { + return err + } + return c.alts.OverrideServerName(s) +} diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go new file mode 100644 index 00000000000..c748fd21ce2 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -0,0 +1,242 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package oauth implements gRPC credentials using OAuth. +package oauth + +import ( + "context" + "fmt" + "io/ioutil" + "net/url" + "sync" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/grpc/credentials" +) + +// TokenSource supplies PerRPCCredentials from an oauth2.TokenSource. +type TokenSource struct { + oauth2.TokenSource +} + +// GetRequestMetadata gets the request metadata as a map from a TokenSource. +func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + token, err := ts.Token() + if err != nil { + return nil, err + } + ri, _ := credentials.RequestInfoFromContext(ctx) + if err = credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer TokenSource PerRPCCredentials: %v", err) + } + return map[string]string{ + "authorization": token.Type() + " " + token.AccessToken, + }, nil +} + +// RequireTransportSecurity indicates whether the credentials requires transport security. +func (ts TokenSource) RequireTransportSecurity() bool { + return true +} + +// removeServiceNameFromJWTURI removes RPC service name from URI. +func removeServiceNameFromJWTURI(uri string) (string, error) { + parsed, err := url.Parse(uri) + if err != nil { + return "", err + } + parsed.Path = "/" + return parsed.String(), nil +} + +type jwtAccess struct { + jsonKey []byte +} + +// NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile. +func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewJWTAccessFromKey(jsonKey) +} + +// NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey. +func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) { + return jwtAccess{jsonKey}, nil +} + +func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + // Remove RPC service name from URI that will be used as audience + // in a self-signed JWT token. It follows https://google.aip.dev/auth/4111. + aud, err := removeServiceNameFromJWTURI(uri[0]) + if err != nil { + return nil, err + } + // TODO: the returned TokenSource is reusable. Store it in a sync.Map, with + // uri as the key, to avoid recreating for every RPC. + ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, aud) + if err != nil { + return nil, err + } + token, err := ts.Token() + if err != nil { + return nil, err + } + ri, _ := credentials.RequestInfoFromContext(ctx) + if err = credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer jwtAccess PerRPCCredentials: %v", err) + } + return map[string]string{ + "authorization": token.Type() + " " + token.AccessToken, + }, nil +} + +func (j jwtAccess) RequireTransportSecurity() bool { + return true +} + +// oauthAccess supplies PerRPCCredentials from a given token. +type oauthAccess struct { + token oauth2.Token +} + +// NewOauthAccess constructs the PerRPCCredentials using a given token. +func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { + return oauthAccess{token: *token} +} + +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + ri, _ := credentials.RequestInfoFromContext(ctx) + if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer oauthAccess PerRPCCredentials: %v", err) + } + return map[string]string{ + "authorization": oa.token.Type() + " " + oa.token.AccessToken, + }, nil +} + +func (oa oauthAccess) RequireTransportSecurity() bool { + return true +} + +// NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from +// Google Compute Engine (GCE)'s metadata server. It is only valid to use this +// if your program is running on a GCE instance. +// TODO(dsymonds): Deprecate and remove this. +func NewComputeEngine() credentials.PerRPCCredentials { + return TokenSource{google.ComputeTokenSource("")} +} + +// serviceAccount represents PerRPCCredentials via JWT signing key. +type serviceAccount struct { + mu sync.Mutex + config *jwt.Config + t *oauth2.Token +} + +func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + s.mu.Lock() + defer s.mu.Unlock() + if !s.t.Valid() { + var err error + s.t, err = s.config.TokenSource(ctx).Token() + if err != nil { + return nil, err + } + } + ri, _ := credentials.RequestInfoFromContext(ctx) + if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer serviceAccount PerRPCCredentials: %v", err) + } + return map[string]string{ + "authorization": s.t.Type() + " " + s.t.AccessToken, + }, nil +} + +func (s *serviceAccount) RequireTransportSecurity() bool { + return true +} + +// NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice +// from a Google Developers service account. +func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) { + config, err := google.JWTConfigFromJSON(jsonKey, scope...) + if err != nil { + return nil, err + } + return &serviceAccount{config: config}, nil +} + +// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file +// of a Google Developers service account. +func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewServiceAccountFromKey(jsonKey, scope...) +} + +// NewApplicationDefault returns "Application Default Credentials". For more +// detail, see https://developers.google.com/accounts/docs/application-default-credentials. +func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) { + creds, err := google.FindDefaultCredentials(ctx, scope...) + if err != nil { + return nil, err + } + + // If JSON is nil, the authentication is provided by the environment and not + // with a credentials file, e.g. when code is running on Google Cloud + // Platform. Use the returned token source. + if creds.JSON == nil { + return TokenSource{creds.TokenSource}, nil + } + + // If auth is provided by env variable or creds file, the behavior will be + // different based on whether scope is set. Because the returned + // creds.TokenSource does oauth with jwt by default, and it requires scope. + // We can only use it if scope is not empty, otherwise it will fail with + // missing scope error. + // + // If scope is set, use it, it should just work. + // + // If scope is not set, we try to use jwt directly without oauth (this only + // works if it's a service account). + + if len(scope) != 0 { + return TokenSource{creds.TokenSource}, nil + } + + // Try to convert JSON to a jwt config without setting the optional scope + // parameter to check if it's a service account (the function errors if it's + // not). This is necessary because the returned config doesn't show the type + // of the account. + if _, err := google.JWTConfigFromJSON(creds.JSON); err != nil { + // If this fails, it's not a service account, return the original + // TokenSource from above. + return TokenSource{creds.TokenSource}, nil + } + + // If it's a service account, create a JWT only access with the key. + return NewJWTAccessFromKey(creds.JSON) +} diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go new file mode 100644 index 00000000000..b5bee483802 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/client.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package health + +import ( + "context" + "fmt" + "io" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/status" +) + +var ( + backoffStrategy = backoff.DefaultExponential + backoffFunc = func(ctx context.Context, retries int) bool { + d := backoffStrategy.Backoff(retries) + timer := time.NewTimer(d) + select { + case <-timer.C: + return true + case <-ctx.Done(): + timer.Stop() + return false + } + } +) + +func init() { + internal.HealthCheckFunc = clientHealthCheck +} + +const healthCheckMethod = "/grpc.health.v1.Health/Watch" + +// This function implements the protocol defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), service string) error { + tryCnt := 0 + +retryConnection: + for { + // Backs off if the connection has failed in some way without receiving a message in the previous retry. + if tryCnt > 0 && !backoffFunc(ctx, tryCnt-1) { + return nil + } + tryCnt++ + + if ctx.Err() != nil { + return nil + } + setConnectivityState(connectivity.Connecting, nil) + rawS, err := newStream(healthCheckMethod) + if err != nil { + continue retryConnection + } + + s, ok := rawS.(grpc.ClientStream) + // Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes. + if !ok { + setConnectivityState(connectivity.Ready, nil) + return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS) + } + + if err = s.SendMsg(&healthpb.HealthCheckRequest{Service: service}); err != nil && err != io.EOF { + // Stream should have been closed, so we can safely continue to create a new stream. + continue retryConnection + } + s.CloseSend() + + resp := new(healthpb.HealthCheckResponse) + for { + err = s.RecvMsg(resp) + + // Reports healthy for the LBing purposes if health check is not implemented in the server. + if status.Code(err) == codes.Unimplemented { + setConnectivityState(connectivity.Ready, nil) + return err + } + + // Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED. + if err != nil { + setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but received health check RPC error: %v", err)) + continue retryConnection + } + + // As a message has been received, removes the need for backoff for the next retry by resetting the try count. + tryCnt = 0 + if resp.Status == healthpb.HealthCheckResponse_SERVING { + setConnectivityState(connectivity.Ready, nil) + } else { + setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but health check failed. status=%s", resp.Status)) + } + } + } +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go new file mode 100644 index 00000000000..a66024d23e3 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -0,0 +1,313 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: grpc/health/v1/health.proto + +package grpc_health_v1 + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 + HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 // Used only by the Watch method. +) + +// Enum value maps for HealthCheckResponse_ServingStatus. +var ( + HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", + 3: "SERVICE_UNKNOWN", + } + HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, + "SERVICE_UNKNOWN": 3, + } +) + +func (x HealthCheckResponse_ServingStatus) Enum() *HealthCheckResponse_ServingStatus { + p := new(HealthCheckResponse_ServingStatus) + *p = x + return p +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HealthCheckResponse_ServingStatus) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_health_v1_health_proto_enumTypes[0].Descriptor() +} + +func (HealthCheckResponse_ServingStatus) Type() protoreflect.EnumType { + return &file_grpc_health_v1_health_proto_enumTypes[0] +} + +func (x HealthCheckResponse_ServingStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HealthCheckResponse_ServingStatus.Descriptor instead. +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1, 0} +} + +type HealthCheckRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` +} + +func (x *HealthCheckRequest) Reset() { + *x = HealthCheckRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheckRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheckRequest) ProtoMessage() {} + +func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead. +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{0} +} + +func (x *HealthCheckRequest) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +type HealthCheckResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (x *HealthCheckResponse) Reset() { + *x = HealthCheckResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheckResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheckResponse) ProtoMessage() {} + +func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead. +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1} +} + +func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { + if x != nil { + return x.Status + } + return HealthCheckResponse_UNKNOWN +} + +var File_grpc_health_v1_health_proto protoreflect.FileDescriptor + +var file_grpc_health_v1_health_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31, + 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a, + 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01, + 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, + 0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, + 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, + 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, + 0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x30, 0x01, 0x42, 0x61, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x5f, 0x76, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_health_v1_health_proto_rawDescOnce sync.Once + file_grpc_health_v1_health_proto_rawDescData = file_grpc_health_v1_health_proto_rawDesc +) + +func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { + file_grpc_health_v1_health_proto_rawDescOnce.Do(func() { + file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_health_v1_health_proto_rawDescData) + }) + return file_grpc_health_v1_health_proto_rawDescData +} + +var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_grpc_health_v1_health_proto_goTypes = []interface{}{ + (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus + (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest + (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse +} +var file_grpc_health_v1_health_proto_depIdxs = []int32{ + 0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus + 1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest + 1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest + 2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse + 2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse + 3, // [3:5] is the sub-list for method output_type + 1, // [1:3] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_grpc_health_v1_health_proto_init() } +func file_grpc_health_v1_health_proto_init() { + if File_grpc_health_v1_health_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheckRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_health_v1_health_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_health_v1_health_proto_goTypes, + DependencyIndexes: file_grpc_health_v1_health_proto_depIdxs, + EnumInfos: file_grpc_health_v1_health_proto_enumTypes, + MessageInfos: file_grpc_health_v1_health_proto_msgTypes, + }.Build() + File_grpc_health_v1_health_proto = out.File + file_grpc_health_v1_health_proto_rawDesc = nil + file_grpc_health_v1_health_proto_goTypes = nil + file_grpc_health_v1_health_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go new file mode 100644 index 00000000000..a332dfd7b54 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -0,0 +1,218 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.14.0 +// source: grpc/health/v1/health.proto + +package grpc_health_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// HealthClient is the client API for Health service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type HealthClient interface { + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) +} + +type healthClient struct { + cc grpc.ClientConnInterface +} + +func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { + stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) + if err != nil { + return nil, err + } + x := &healthWatchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Health_WatchClient interface { + Recv() (*HealthCheckResponse, error) + grpc.ClientStream +} + +type healthWatchClient struct { + grpc.ClientStream +} + +func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { + m := new(HealthCheckResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// HealthServer is the server API for Health service. +// All implementations should embed UnimplementedHealthServer +// for forward compatibility +type HealthServer interface { + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(*HealthCheckRequest, Health_WatchServer) error +} + +// UnimplementedHealthServer should be embedded to have forward compatible implementations. +type UnimplementedHealthServer struct { +} + +func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") +} +func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") +} + +// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to HealthServer will +// result in compilation errors. +type UnsafeHealthServer interface { + mustEmbedUnimplementedHealthServer() +} + +func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) { + s.RegisterService(&Health_ServiceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.health.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(HealthCheckRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(HealthServer).Watch(m, &healthWatchServer{stream}) +} + +type Health_WatchServer interface { + Send(*HealthCheckResponse) error + grpc.ServerStream +} + +type healthWatchServer struct { + grpc.ServerStream +} + +func (x *healthWatchServer) Send(m *HealthCheckResponse) error { + return x.ServerStream.SendMsg(m) +} + +// Health_ServiceDesc is the grpc.ServiceDesc for Health service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Health_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Health_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc/health/v1/health.proto", +} diff --git a/vendor/google.golang.org/grpc/health/logging.go b/vendor/google.golang.org/grpc/health/logging.go new file mode 100644 index 00000000000..83c6acf55ef --- /dev/null +++ b/vendor/google.golang.org/grpc/health/logging.go @@ -0,0 +1,23 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package health + +import "google.golang.org/grpc/grpclog" + +var logger = grpclog.Component("health_service") diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go new file mode 100644 index 00000000000..cce6312d77f --- /dev/null +++ b/vendor/google.golang.org/grpc/health/server.go @@ -0,0 +1,163 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package health provides a service that exposes server's health and it must be +// imported to enable support for client-side health checks. +package health + +import ( + "context" + "sync" + + "google.golang.org/grpc/codes" + healthgrpc "google.golang.org/grpc/health/grpc_health_v1" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +// Server implements `service Health`. +type Server struct { + healthgrpc.UnimplementedHealthServer + mu sync.RWMutex + // If shutdown is true, it's expected all serving status is NOT_SERVING, and + // will stay in NOT_SERVING. + shutdown bool + // statusMap stores the serving status of the services this Server monitors. + statusMap map[string]healthpb.HealthCheckResponse_ServingStatus + updates map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus +} + +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ + statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING}, + updates: make(map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus), + } +} + +// Check implements `service Health`. +func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { + s.mu.RLock() + defer s.mu.RUnlock() + if servingStatus, ok := s.statusMap[in.Service]; ok { + return &healthpb.HealthCheckResponse{ + Status: servingStatus, + }, nil + } + return nil, status.Error(codes.NotFound, "unknown service") +} + +// Watch implements `service Health`. +func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { + service := in.Service + // update channel is used for getting service status updates. + update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1) + s.mu.Lock() + // Puts the initial status to the channel. + if servingStatus, ok := s.statusMap[service]; ok { + update <- servingStatus + } else { + update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN + } + + // Registers the update channel to the correct place in the updates map. + if _, ok := s.updates[service]; !ok { + s.updates[service] = make(map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus) + } + s.updates[service][stream] = update + defer func() { + s.mu.Lock() + delete(s.updates[service], stream) + s.mu.Unlock() + }() + s.mu.Unlock() + + var lastSentStatus healthpb.HealthCheckResponse_ServingStatus = -1 + for { + select { + // Status updated. Sends the up-to-date status to the client. + case servingStatus := <-update: + if lastSentStatus == servingStatus { + continue + } + lastSentStatus = servingStatus + err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus}) + if err != nil { + return status.Error(codes.Canceled, "Stream has ended.") + } + // Context done. Removes the update channel from the updates map. + case <-stream.Context().Done(): + return status.Error(codes.Canceled, "Stream has ended.") + } + } +} + +// SetServingStatus is called when need to reset the serving status of a service +// or insert a new service entry into the statusMap. +func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { + s.mu.Lock() + defer s.mu.Unlock() + if s.shutdown { + logger.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus) + return + } + + s.setServingStatusLocked(service, servingStatus) +} + +func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { + s.statusMap[service] = servingStatus + for _, update := range s.updates[service] { + // Clears previous updates, that are not sent to the client, from the channel. + // This can happen if the client is not reading and the server gets flow control limited. + select { + case <-update: + default: + } + // Puts the most recent update to the channel. + update <- servingStatus + } +} + +// Shutdown sets all serving status to NOT_SERVING, and configures the server to +// ignore all future status changes. +// +// This changes serving status for all services. To set status for a particular +// services, call SetServingStatus(). +func (s *Server) Shutdown() { + s.mu.Lock() + defer s.mu.Unlock() + s.shutdown = true + for service := range s.statusMap { + s.setServingStatusLocked(service, healthpb.HealthCheckResponse_NOT_SERVING) + } +} + +// Resume sets all serving status to SERVING, and configures the server to +// accept all future status changes. +// +// This changes serving status for all services. To set status for a particular +// services, call SetServingStatus(). +func (s *Server) Resume() { + s.mu.Lock() + defer s.mu.Unlock() + s.shutdown = false + for service := range s.statusMap { + s.setServingStatusLocked(service, healthpb.HealthCheckResponse_SERVING) + } +} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go new file mode 100644 index 00000000000..6717b757f80 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package googlecloud contains internal helpful functions for google cloud. +package googlecloud + +import ( + "runtime" + "strings" + "sync" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const logPrefix = "[googlecloud]" + +var ( + vmOnGCEOnce sync.Once + vmOnGCE bool + + logger = internalgrpclog.NewPrefixLogger(grpclog.Component("googlecloud"), logPrefix) +) + +// OnGCE returns whether the client is running on GCE. +// +// It provides similar functionality as metadata.OnGCE from the cloud library +// package. We keep this to avoid depending on the cloud library module. +func OnGCE() bool { + vmOnGCEOnce.Do(func() { + mf, err := manufacturer() + if err != nil { + logger.Infof("failed to read manufacturer, setting onGCE=false: %v") + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) + }) + return vmOnGCE +} + +// isRunningOnGCE checks whether the local system, without doing a network request, is +// running on GCP. +func isRunningOnGCE(manufacturer []byte, goos string) bool { + name := string(manufacturer) + switch goos { + case "linux": + name = strings.TrimSpace(name) + return name == "Google" || name == "Google Compute Engine" + case "windows": + name = strings.Replace(name, " ", "", -1) + name = strings.Replace(name, "\n", "", -1) + name = strings.Replace(name, "\r", "", -1) + return name == "Google" + default: + return false + } +} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go new file mode 100644 index 00000000000..ffa0f1ddee5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go @@ -0,0 +1,26 @@ +//go:build !(linux || windows) +// +build !linux,!windows + +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +func manufacturer() ([]byte, error) { + return nil, nil +} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go new file mode 100644 index 00000000000..e53b8ffc837 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go @@ -0,0 +1,27 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import "io/ioutil" + +const linuxProductNameFile = "/sys/class/dmi/id/product_name" + +func manufacturer() ([]byte, error) { + return ioutil.ReadFile(linuxProductNameFile) +} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go new file mode 100644 index 00000000000..2d7aaaaa70f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/vendor/google.golang.org/grpc/reflection/README.md b/vendor/google.golang.org/grpc/reflection/README.md new file mode 100644 index 00000000000..04b6371afcb --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/README.md @@ -0,0 +1,18 @@ +# Reflection + +Package reflection implements server reflection service. + +The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: +```go +import "google.golang.org/grpc/reflection" + +s := grpc.NewServer() +pb.RegisterYourOwnServer(s, &server{}) + +// Register reflection service on gRPC server. +reflection.Register(s) + +s.Serve(lis) +``` diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go new file mode 100644 index 00000000000..c22f9a52db4 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -0,0 +1,955 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: reflection/grpc_reflection_v1alpha/reflection.proto + +package grpc_reflection_v1alpha + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are assignable to MessageRequest: + // + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (x *ServerReflectionRequest) Reset() { + *x = ServerReflectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionRequest) ProtoMessage() {} + +func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerReflectionRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (x *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (x *ServerReflectionRequest) GetListServices() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + // Find a proto file by the file name. + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + // Find the proto file which defines an extension extending the given + // message type with the given field number. + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + // Finds the tag numbers used by all known extensions of extendee_type, and + // appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + // List the full names of registered services. The content will not be + // checked. + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionRequest) Reset() { + *x = ExtensionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRequest) ProtoMessage() {} + +func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} +} + +func (x *ExtensionRequest) GetContainingType() string { + if x != nil { + return x.ContainingType + } + return "" +} + +func (x *ExtensionRequest) GetExtensionNumber() int32 { + if x != nil { + return x.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server sets one of the following fields according to the + // message_request in the request. + // + // Types that are assignable to MessageResponse: + // + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (x *ServerReflectionResponse) Reset() { + *x = ServerReflectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionResponse) ProtoMessage() {} + +func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerReflectionResponse) GetValidHost() string { + if x != nil { + return x.ValidHost + } + return "" +} + +func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if x != nil { + return x.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + // This message is used to answer all_extension_numbers_of_type requests. + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + // This message is used to answer list_services requests. + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + // This message is used when an error occurs. + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (x *FileDescriptorResponse) Reset() { + *x = FileDescriptorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorResponse) ProtoMessage() {} + +func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} +} + +func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if x != nil { + return x.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionNumberResponse) Reset() { + *x = ExtensionNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionNumberResponse) ProtoMessage() {} + +func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} +} + +func (x *ExtensionNumberResponse) GetBaseTypeName() string { + if x != nil { + return x.BaseTypeName + } + return "" +} + +func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if x != nil { + return x.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` +} + +func (x *ListServiceResponse) Reset() { + *x = ListServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceResponse) ProtoMessage() {} + +func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} +} + +func (x *ListServiceResponse) GetService() []*ServiceResponse { + if x != nil { + return x.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ServiceResponse) Reset() { + *x = ServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceResponse) ProtoMessage() {} + +func (x *ServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} +} + +func (x *ServiceResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} +} + +func (x *ErrorResponse) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *ErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_reflection_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor + +var file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ + 0x0a, 0x33, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x22, 0xf8, + 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, + 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, + 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, + 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, + 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, + 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, + 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, + 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x5b, 0x0a, + 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x6b, 0x0a, 0x18, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, + 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, + 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, + 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, + 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, + 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x07, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0x93, 0x01, 0x0a, + 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, + 0x30, 0x01, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once + file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc +) + +func file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { + file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { + file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData) + }) + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData +} + +var file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ + (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1alpha.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: grpc.reflection.v1alpha.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1alpha.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1alpha.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1alpha.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: grpc.reflection.v1alpha.ListServiceResponse + (*ServiceResponse)(nil), // 6: grpc.reflection.v1alpha.ServiceResponse + (*ErrorResponse)(nil), // 7: grpc.reflection.v1alpha.ErrorResponse +} +var file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ + 1, // 0: grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1alpha.ExtensionRequest + 0, // 1: grpc.reflection.v1alpha.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1alpha.ServerReflectionRequest + 3, // 2: grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1alpha.FileDescriptorResponse + 4, // 3: grpc.reflection.v1alpha.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1alpha.ExtensionNumberResponse + 5, // 4: grpc.reflection.v1alpha.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1alpha.ListServiceResponse + 7, // 5: grpc.reflection.v1alpha.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1alpha.ErrorResponse + 6, // 6: grpc.reflection.v1alpha.ListServiceResponse.service:type_name -> grpc.reflection.v1alpha.ServiceResponse + 0, // 7: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1alpha.ServerReflectionRequest + 2, // 8: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1alpha.ServerReflectionResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_reflection_grpc_reflection_v1alpha_reflection_proto_init() } +func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { + if File_reflection_grpc_reflection_v1alpha_reflection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes, + DependencyIndexes: file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs, + MessageInfos: file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes, + }.Build() + File_reflection_grpc_reflection_v1alpha_reflection_proto = out.File + file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil + file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = nil + file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto new file mode 100644 index 00000000000..ee2b82c0a5b --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto @@ -0,0 +1,138 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection + +syntax = "proto3"; + +option go_package = "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"; + +package grpc.reflection.v1alpha; + +service ServerReflection { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + rpc ServerReflectionInfo(stream ServerReflectionRequest) + returns (stream ServerReflectionResponse); +} + +// The message sent by the client when calling ServerReflectionInfo method. +message ServerReflectionRequest { + string host = 1; + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + oneof message_request { + // Find a proto file by the file name. + string file_by_filename = 3; + + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + string file_containing_symbol = 4; + + // Find the proto file which defines an extension extending the given + // message type with the given field number. + ExtensionRequest file_containing_extension = 5; + + // Finds the tag numbers used by all known extensions of extendee_type, and + // appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + string all_extension_numbers_of_type = 6; + + // List the full names of registered services. The content will not be + // checked. + string list_services = 7; + } +} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +message ExtensionRequest { + // Fully-qualified type name. The format should be . + string containing_type = 1; + int32 extension_number = 2; +} + +// The message sent by the server to answer ServerReflectionInfo method. +message ServerReflectionResponse { + string valid_host = 1; + ServerReflectionRequest original_request = 2; + // The server sets one of the following fields according to the + // message_request in the request. + oneof message_response { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse file_descriptor_response = 4; + + // This message is used to answer all_extension_numbers_of_type requests. + ExtensionNumberResponse all_extension_numbers_response = 5; + + // This message is used to answer list_services requests. + ListServiceResponse list_services_response = 6; + + // This message is used when an error occurs. + ErrorResponse error_response = 7; + } +} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +message FileDescriptorResponse { + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + repeated bytes file_descriptor_proto = 1; +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +message ExtensionNumberResponse { + // Full name of the base type, including the package name. The format + // is . + string base_type_name = 1; + repeated int32 extension_number = 2; +} + +// A list of ServiceResponse sent by the server answering list_services request. +message ListServiceResponse { + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + repeated ServiceResponse service = 1; +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +message ServiceResponse { + // Full name of a registered service, including its package name. The format + // is . + string name = 1; +} + +// The error code and error message sent by the server when an error occurs. +message ErrorResponse { + // This field uses the error codes defined in grpc::StatusCode. + int32 error_code = 1; + string error_message = 2; +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go new file mode 100644 index 00000000000..b8e76a87dca --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -0,0 +1,155 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.14.0 +// source: reflection/grpc_reflection_v1alpha/reflection.proto + +package grpc_reflection_v1alpha + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} + +// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServerReflectionServer will +// result in compilation errors. +type UnsafeServerReflectionServer interface { + mustEmbedUnimplementedServerReflectionServer() +} + +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + s.RegisterService(&ServerReflection_ServiceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1alpha.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "reflection/grpc_reflection_v1alpha/reflection.proto", +} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go new file mode 100644 index 00000000000..0b41783aa53 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -0,0 +1,324 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package reflection implements server reflection service. + +The service implemented is defined in: +https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: + + import "google.golang.org/grpc/reflection" + + s := grpc.NewServer() + pb.RegisterYourOwnServer(s, &server{}) + + // Register reflection service on gRPC server. + reflection.Register(s) + + s.Serve(lis) +*/ +package reflection // import "google.golang.org/grpc/reflection" + +import ( + "io" + "sort" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// GRPCServer is the interface provided by a gRPC server. It is implemented by +// *grpc.Server, but could also be implemented by other concrete types. It acts +// as a registry, for accumulating the services exposed by the server. +type GRPCServer interface { + grpc.ServiceRegistrar + ServiceInfoProvider +} + +var _ GRPCServer = (*grpc.Server)(nil) + +// Register registers the server reflection service on the given gRPC server. +func Register(s GRPCServer) { + svr := NewServer(ServerOptions{Services: s}) + rpb.RegisterServerReflectionServer(s, svr) +} + +// ServiceInfoProvider is an interface used to retrieve metadata about the +// services to expose. +// +// The reflection service is only interested in the service names, but the +// signature is this way so that *grpc.Server implements it. So it is okay +// for a custom implementation to return zero values for the +// grpc.ServiceInfo values in the map. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServiceInfoProvider interface { + GetServiceInfo() map[string]grpc.ServiceInfo +} + +// ExtensionResolver is the interface used to query details about extensions. +// This interface is satisfied by protoregistry.GlobalTypes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ExtensionResolver interface { + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) +} + +// ServerOptions represents the options used to construct a reflection server. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerOptions struct { + // The source of advertised RPC services. If not specified, the reflection + // server will report an empty list when asked to list services. + // + // This value will typically be a *grpc.Server. But the set of advertised + // services can be customized by wrapping a *grpc.Server or using an + // alternate implementation that returns a custom set of service names. + Services ServiceInfoProvider + // Optional resolver used to load descriptors. If not specified, + // protoregistry.GlobalFiles will be used. + DescriptorResolver protodesc.Resolver + // Optional resolver used to query for known extensions. If not specified, + // protoregistry.GlobalTypes will be used. + ExtensionResolver ExtensionResolver +} + +// NewServer returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServer(opts ServerOptions) rpb.ServerReflectionServer { + if opts.DescriptorResolver == nil { + opts.DescriptorResolver = protoregistry.GlobalFiles + } + if opts.ExtensionResolver == nil { + opts.ExtensionResolver = protoregistry.GlobalTypes + } + return &serverReflectionServer{ + s: opts.Services, + descResolver: opts.DescriptorResolver, + extResolver: opts.ExtensionResolver, + } +} + +type serverReflectionServer struct { + rpb.UnimplementedServerReflectionServer + s ServiceInfoProvider + descResolver protodesc.Resolver + extResolver ExtensionResolver +} + +// fileDescWithDependencies returns a slice of serialized fileDescriptors in +// wire format ([]byte). The fileDescriptors will include fd and all the +// transitive dependencies of fd with names not in sentFileDescriptors. +func (s *serverReflectionServer) fileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { + var r [][]byte + queue := []protoreflect.FileDescriptor{fd} + for len(queue) > 0 { + currentfd := queue[0] + queue = queue[1:] + if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { + sentFileDescriptors[currentfd.Path()] = true + fdProto := protodesc.ToFileDescriptorProto(currentfd) + currentfdEncoded, err := proto.Marshal(fdProto) + if err != nil { + return nil, err + } + r = append(r, currentfdEncoded) + } + for i := 0; i < currentfd.Imports().Len(); i++ { + queue = append(queue, currentfd.Imports().Get(i)) + } + } + return r, nil +} + +// fileDescEncodingContainingSymbol finds the file descriptor containing the +// given symbol, finds all of its previously unsent transitive dependencies, +// does marshalling on them, and returns the marshalled result. The given symbol +// can be a type, a service or a method. +func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { + d, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)) + if err != nil { + return nil, err + } + return s.fileDescWithDependencies(d.ParentFile(), sentFileDescriptors) +} + +// fileDescEncodingContainingExtension finds the file descriptor containing +// given extension, finds all of its previously unsent transitive dependencies, +// does marshalling on them, and returns the marshalled result. +func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { + xt, err := s.extResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum)) + if err != nil { + return nil, err + } + return s.fileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors) +} + +// allExtensionNumbersForTypeName returns all extension numbers for the given type. +func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { + var numbers []int32 + s.extResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool { + numbers = append(numbers, int32(xt.TypeDescriptor().Number())) + return true + }) + sort.Slice(numbers, func(i, j int) bool { + return numbers[i] < numbers[j] + }) + if len(numbers) == 0 { + // maybe return an error if given type name is not known + if _, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil { + return nil, err + } + } + return numbers, nil +} + +// listServices returns the names of services this server exposes. +func (s *serverReflectionServer) listServices() []*rpb.ServiceResponse { + serviceInfo := s.s.GetServiceInfo() + resp := make([]*rpb.ServiceResponse, 0, len(serviceInfo)) + for svc := range serviceInfo { + resp = append(resp, &rpb.ServiceResponse{Name: svc}) + } + sort.Slice(resp, func(i, j int) bool { + return resp[i].Name < resp[j].Name + }) + return resp +} + +// ServerReflectionInfo is the reflection service handler. +func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error { + sentFileDescriptors := make(map[string]bool) + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + out := &rpb.ServerReflectionResponse{ + ValidHost: in.Host, + OriginalRequest: in, + } + switch req := in.MessageRequest.(type) { + case *rpb.ServerReflectionRequest_FileByFilename: + var b [][]byte + fd, err := s.descResolver.FindFileByPath(req.FileByFilename) + if err == nil { + b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) + } + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *rpb.ServerReflectionRequest_FileContainingSymbol: + b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *rpb.ServerReflectionRequest_FileContainingExtension: + typeName := req.FileContainingExtension.ContainingType + extNum := req.FileContainingExtension.ExtensionNumber + b, err := s.fileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *rpb.ServerReflectionRequest_AllExtensionNumbersOfType: + extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &rpb.ExtensionNumberResponse{ + BaseTypeName: req.AllExtensionNumbersOfType, + ExtensionNumber: extNums, + }, + } + } + case *rpb.ServerReflectionRequest_ListServices: + out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &rpb.ListServiceResponse{ + Service: s.listServices(), + }, + } + default: + return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest) + } + + if err := stream.Send(out); err != nil { + return err + } + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index a913f5f088f..957e4c20efe 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,9 +1,18 @@ -# cloud.google.com/go/compute v1.12.1 +# cloud.google.com/go/compute v1.13.0 ## explicit; go 1.19 cloud.google.com/go/compute/internal -# cloud.google.com/go/compute/metadata v0.2.1 +# cloud.google.com/go/compute/metadata v0.2.2 ## explicit; go 1.19 cloud.google.com/go/compute/metadata +# cloud.google.com/go/iam v0.8.0 +## explicit; go 1.19 +cloud.google.com/go/iam +cloud.google.com/go/iam/apiv1/iampb +# cloud.google.com/go/kms v1.7.0 +## explicit; go 1.19 +cloud.google.com/go/kms/apiv1 +cloud.google.com/go/kms/apiv1/kmspb +cloud.google.com/go/kms/internal # code.gitea.io/sdk/gitea v0.15.1 ## explicit; go 1.13 code.gitea.io/sdk/gitea @@ -15,6 +24,8 @@ contrib.go.opencensus.io/exporter/ocagent contrib.go.opencensus.io/exporter/prometheus # github.com/Azure/azure-sdk-for-go v67.1.0+incompatible ## explicit +github.com/Azure/azure-sdk-for-go/services/keyvault/auth +github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry github.com/Azure/azure-sdk-for-go/version # github.com/Azure/go-autorest v14.2.0+incompatible @@ -36,6 +47,12 @@ github.com/Azure/go-autorest/autorest/azure/cli # github.com/Azure/go-autorest/autorest/date v0.3.0 ## explicit; go 1.12 github.com/Azure/go-autorest/autorest/date +# github.com/Azure/go-autorest/autorest/to v0.4.0 +## explicit; go 1.12 +github.com/Azure/go-autorest/autorest/to +# github.com/Azure/go-autorest/autorest/validation v0.3.1 +## explicit; go 1.12 +github.com/Azure/go-autorest/autorest/validation # github.com/Azure/go-autorest/logger v0.2.1 ## explicit; go 1.12 github.com/Azure/go-autorest/logger @@ -71,7 +88,13 @@ github.com/acomagu/bufpipe # github.com/ahmetb/gen-crd-api-reference-docs v0.3.1-0.20220720053627-e327d0730470 => github.com/tektoncd/ahmetb-gen-crd-api-reference-docs v0.3.1-0.20220729140133-6ce2d5aafcb4 ## explicit; go 1.17 github.com/ahmetb/gen-crd-api-reference-docs -# github.com/aws/aws-sdk-go-v2 v1.17.1 +# github.com/armon/go-metrics v0.4.1 +## explicit; go 1.12 +github.com/armon/go-metrics +# github.com/armon/go-radix v1.0.0 +## explicit +github.com/armon/go-radix +# github.com/aws/aws-sdk-go-v2 v1.17.2 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2 github.com/aws/aws-sdk-go-v2/aws @@ -91,10 +114,10 @@ github.com/aws/aws-sdk-go-v2/internal/sdkio github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/config v1.18.3 +# github.com/aws/aws-sdk-go-v2/config v1.18.4 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.13.3 +# github.com/aws/aws-sdk-go-v2/credentials v1.13.4 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds @@ -103,17 +126,17 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client github.com/aws/aws-sdk-go-v2/credentials/processcreds github.com/aws/aws-sdk-go-v2/credentials/ssocreds github.com/aws/aws-sdk-go-v2/credentials/stscreds -# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19 +# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/feature/ec2/imds github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -# github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26 +# github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/internal/ini # github.com/aws/aws-sdk-go-v2/service/ecr v1.17.18 @@ -126,25 +149,30 @@ github.com/aws/aws-sdk-go-v2/service/ecr/types github.com/aws/aws-sdk-go-v2/service/ecrpublic github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ecrpublic/types -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/sso v1.11.25 +# github.com/aws/aws-sdk-go-v2/service/kms v1.19.2 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/service/kms +github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints +github.com/aws/aws-sdk-go-v2/service/kms/types +# github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sso/types -# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8 +# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/ssooidc github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ssooidc/types -# github.com/aws/aws-sdk-go-v2/service/sts v1.17.5 +# github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sts/types -# github.com/aws/smithy-go v1.13.4 +# github.com/aws/smithy-go v1.13.5 ## explicit; go 1.15 github.com/aws/smithy-go github.com/aws/smithy-go/auth/bearer @@ -186,6 +214,9 @@ github.com/blendle/zapdriver # github.com/bluekeyes/go-gitdiff v0.7.0 ## explicit; go 1.13 github.com/bluekeyes/go-gitdiff/gitdiff +# github.com/cenkalti/backoff/v3 v3.2.2 +## explicit; go 1.12 +github.com/cenkalti/backoff/v3 # github.com/census-instrumentation/opencensus-proto v0.3.0 ## explicit github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1 @@ -267,6 +298,9 @@ github.com/evanphx/json-patch # github.com/evanphx/json-patch/v5 v5.6.0 ## explicit; go 1.12 github.com/evanphx/json-patch/v5 +# github.com/fatih/color v1.13.0 +## explicit; go 1.13 +github.com/fatih/color # github.com/go-git/gcfg v1.5.0 ## explicit github.com/go-git/gcfg @@ -370,8 +404,12 @@ github.com/golang/protobuf/protoc-gen-go/descriptor github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers +# github.com/golang/snappy v0.0.4 +## explicit +github.com/golang/snappy # github.com/google/gnostic v0.6.9 ## explicit; go 1.12 github.com/google/gnostic/compiler @@ -427,6 +465,16 @@ github.com/google/gofuzz/bytesource # github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid +# github.com/googleapis/enterprise-certificate-proxy v0.2.0 +## explicit; go 1.18 +github.com/googleapis/enterprise-certificate-proxy/client +github.com/googleapis/enterprise-certificate-proxy/client/util +# github.com/googleapis/gax-go/v2 v2.7.0 +## explicit; go 1.19 +github.com/googleapis/gax-go/v2 +github.com/googleapis/gax-go/v2/apierror +github.com/googleapis/gax-go/v2/apierror/internal/proto +github.com/googleapis/gax-go/v2/internal # github.com/grpc-ecosystem/grpc-gateway v1.16.0 ## explicit; go 1.14 github.com/grpc-ecosystem/grpc-gateway/internal @@ -435,9 +483,43 @@ github.com/grpc-ecosystem/grpc-gateway/utilities # github.com/hashicorp/errwrap v1.1.0 ## explicit github.com/hashicorp/errwrap +# github.com/hashicorp/go-cleanhttp v0.5.2 +## explicit; go 1.13 +github.com/hashicorp/go-cleanhttp +# github.com/hashicorp/go-hclog v1.3.1 +## explicit; go 1.13 +github.com/hashicorp/go-hclog +# github.com/hashicorp/go-immutable-radix v1.3.1 +## explicit +github.com/hashicorp/go-immutable-radix # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror +# github.com/hashicorp/go-plugin v1.4.6 +## explicit; go 1.17 +github.com/hashicorp/go-plugin +github.com/hashicorp/go-plugin/internal/plugin +# github.com/hashicorp/go-retryablehttp v0.7.1 +## explicit; go 1.13 +github.com/hashicorp/go-retryablehttp +# github.com/hashicorp/go-rootcerts v1.0.2 +## explicit; go 1.12 +github.com/hashicorp/go-rootcerts +# github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 +## explicit; go 1.16 +github.com/hashicorp/go-secure-stdlib/mlock +# github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 +## explicit; go 1.16 +github.com/hashicorp/go-secure-stdlib/parseutil +# github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 +## explicit; go 1.16 +github.com/hashicorp/go-secure-stdlib/strutil +# github.com/hashicorp/go-sockaddr v1.0.2 +## explicit +github.com/hashicorp/go-sockaddr +# github.com/hashicorp/go-uuid v1.0.3 +## explicit +github.com/hashicorp/go-uuid # github.com/hashicorp/go-version v1.6.0 ## explicit github.com/hashicorp/go-version @@ -445,12 +527,52 @@ github.com/hashicorp/go-version ## explicit; go 1.12 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru +# github.com/hashicorp/hcl v1.0.0 +## explicit +github.com/hashicorp/hcl +github.com/hashicorp/hcl/hcl/ast +github.com/hashicorp/hcl/hcl/parser +github.com/hashicorp/hcl/hcl/scanner +github.com/hashicorp/hcl/hcl/strconv +github.com/hashicorp/hcl/hcl/token +github.com/hashicorp/hcl/json/parser +github.com/hashicorp/hcl/json/scanner +github.com/hashicorp/hcl/json/token +# github.com/hashicorp/vault/api v1.8.2 +## explicit; go 1.19 +github.com/hashicorp/vault/api +# github.com/hashicorp/vault/sdk v0.6.1 +## explicit; go 1.19 +github.com/hashicorp/vault/sdk/helper/certutil +github.com/hashicorp/vault/sdk/helper/compressutil +github.com/hashicorp/vault/sdk/helper/consts +github.com/hashicorp/vault/sdk/helper/cryptoutil +github.com/hashicorp/vault/sdk/helper/errutil +github.com/hashicorp/vault/sdk/helper/hclutil +github.com/hashicorp/vault/sdk/helper/jsonutil +github.com/hashicorp/vault/sdk/helper/license +github.com/hashicorp/vault/sdk/helper/locksutil +github.com/hashicorp/vault/sdk/helper/logging +github.com/hashicorp/vault/sdk/helper/pathmanager +github.com/hashicorp/vault/sdk/helper/pluginutil +github.com/hashicorp/vault/sdk/helper/strutil +github.com/hashicorp/vault/sdk/helper/wrapping +github.com/hashicorp/vault/sdk/logical +github.com/hashicorp/vault/sdk/physical +github.com/hashicorp/vault/sdk/physical/inmem +github.com/hashicorp/vault/sdk/version +# github.com/hashicorp/yamux v0.1.1 +## explicit; go 1.15 +github.com/hashicorp/yamux # github.com/imdario/mergo v0.3.12 ## explicit; go 1.13 github.com/imdario/mergo # github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 ## explicit github.com/jbenet/go-context/io +# github.com/jellydator/ttlcache/v2 v2.11.1 +## explicit; go 1.15 +github.com/jellydator/ttlcache/v2 # github.com/jenkins-x/go-scm v1.11.35 ## explicit; go 1.18 github.com/jenkins-x/go-scm/pkg/hmac @@ -508,6 +630,12 @@ github.com/letsencrypt/boulder/sa/proto github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter +# github.com/mattn/go-colorable v0.1.13 +## explicit; go 1.15 +github.com/mattn/go-colorable +# github.com/mattn/go-isatty v0.0.16 +## explicit; go 1.15 +github.com/mattn/go-isatty # github.com/matttproud/golang_protobuf_extensions v1.0.4 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil @@ -517,6 +645,12 @@ github.com/mitchellh/copystructure # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir +# github.com/mitchellh/go-testing-interface v1.14.1 +## explicit; go 1.14 +github.com/mitchellh/go-testing-interface +# github.com/mitchellh/mapstructure v1.5.0 +## explicit; go 1.14 +github.com/mitchellh/mapstructure # github.com/mitchellh/reflectwalk v1.0.2 ## explicit github.com/mitchellh/reflectwalk @@ -533,6 +667,9 @@ github.com/modern-go/reflect2 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg +# github.com/oklog/run v1.1.0 +## explicit; go 1.13 +github.com/oklog/run # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest @@ -543,6 +680,10 @@ github.com/opencontainers/image-spec/specs-go/v1 # github.com/openzipkin/zipkin-go v0.3.0 ## explicit; go 1.14 github.com/openzipkin/zipkin-go/model +# github.com/pierrec/lz4 v2.6.1+incompatible +## explicit +github.com/pierrec/lz4 +github.com/pierrec/lz4/internal/xxh32 # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -576,6 +717,9 @@ github.com/prometheus/statsd_exporter/pkg/mapper/fsm # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 +# github.com/ryanuber/go-glob v1.0.0 +## explicit +github.com/ryanuber/go-glob # github.com/sergi/go-diff v1.2.0 ## explicit; go 1.12 github.com/sergi/go-diff/diffmatchpatch @@ -587,10 +731,15 @@ github.com/shurcooL/githubv4 github.com/shurcooL/graphql github.com/shurcooL/graphql/ident github.com/shurcooL/graphql/internal/jsonutil -# github.com/sigstore/sigstore v1.4.6 +# github.com/sigstore/sigstore v1.5.0 ## explicit; go 1.18 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/signature +github.com/sigstore/sigstore/pkg/signature/kms +github.com/sigstore/sigstore/pkg/signature/kms/aws +github.com/sigstore/sigstore/pkg/signature/kms/azure +github.com/sigstore/sigstore/pkg/signature/kms/gcp +github.com/sigstore/sigstore/pkg/signature/kms/hashivault github.com/sigstore/sigstore/pkg/signature/options github.com/sigstore/sigstore/pkg/signature/payload # github.com/sirupsen/logrus v1.9.0 @@ -689,11 +838,14 @@ go.uber.org/zap/internal/ztest go.uber.org/zap/zapcore go.uber.org/zap/zaptest go.uber.org/zap/zaptest/observer -# golang.org/x/crypto v0.3.0 +# golang.org/x/crypto v0.4.0 ## explicit; go 1.17 +golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 golang.org/x/crypto/chacha20 +golang.org/x/crypto/cryptobyte +golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/curve25519 golang.org/x/crypto/curve25519/internal/field golang.org/x/crypto/ed25519 @@ -716,7 +868,7 @@ golang.org/x/crypto/ssh/knownhosts golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.2.0 +# golang.org/x/net v0.3.0 ## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -729,7 +881,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.2.0 +# golang.org/x/oauth2 v0.3.0 ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -743,7 +895,8 @@ golang.org/x/oauth2/jwt ## explicit golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.2.0 +golang.org/x/sync/singleflight +# golang.org/x/sys v0.3.0 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -751,10 +904,10 @@ golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.2.0 +# golang.org/x/term v0.3.0 ## explicit; go 1.17 golang.org/x/term -# golang.org/x/text v0.4.0 +# golang.org/x/text v0.5.0 ## explicit; go 1.17 golang.org/x/text/cases golang.org/x/text/internal @@ -793,9 +946,19 @@ golang.org/x/tools/internal/typesinternal # gomodules.xyz/jsonpatch/v2 v2.2.0 ## explicit; go 1.12 gomodules.xyz/jsonpatch/v2 -# google.golang.org/api v0.103.0 +# google.golang.org/api v0.104.0 ## explicit; go 1.19 +google.golang.org/api/googleapi +google.golang.org/api/internal +google.golang.org/api/internal/impersonate +google.golang.org/api/internal/third_party/uritemplates +google.golang.org/api/iterator +google.golang.org/api/option +google.golang.org/api/option/internaloption google.golang.org/api/support/bundler +google.golang.org/api/transport/cert +google.golang.org/api/transport/grpc +google.golang.org/api/transport/internal/dca # google.golang.org/appengine v1.6.7 ## explicit; go 1.11 google.golang.org/appengine @@ -806,12 +969,22 @@ google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api +google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch +google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20221111202108-142d8a6fa32e +# google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6 ## explicit; go 1.19 +google.golang.org/genproto/googleapis/api +google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody +google.golang.org/genproto/googleapis/cloud/kms/v1 +google.golang.org/genproto/googleapis/cloud/location +google.golang.org/genproto/googleapis/iam/v1 +google.golang.org/genproto/googleapis/rpc/code +google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status +google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/field_mask # google.golang.org/grpc v1.51.0 ## explicit; go 1.17 @@ -820,6 +993,8 @@ google.golang.org/grpc/attributes google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/grpclb +google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 @@ -827,10 +1002,21 @@ google.golang.org/grpc/channelz google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/alts +google.golang.org/grpc/credentials/alts/internal +google.golang.org/grpc/credentials/alts/internal/authinfo +google.golang.org/grpc/credentials/alts/internal/conn +google.golang.org/grpc/credentials/alts/internal/handshaker +google.golang.org/grpc/credentials/alts/internal/handshaker/service +google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp +google.golang.org/grpc/credentials/google google.golang.org/grpc/credentials/insecure +google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog +google.golang.org/grpc/health +google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff google.golang.org/grpc/internal/balancer/gracefulswitch @@ -840,6 +1026,7 @@ google.golang.org/grpc/internal/buffer google.golang.org/grpc/internal/channelz google.golang.org/grpc/internal/credentials google.golang.org/grpc/internal/envconfig +google.golang.org/grpc/internal/googlecloud google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync @@ -858,6 +1045,8 @@ google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer +google.golang.org/grpc/reflection +google.golang.org/grpc/reflection/grpc_reflection_v1alpha google.golang.org/grpc/resolver google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats